#! /bin/sh
# This is a shell archive. Remove anything before this line, then unpack
# it by saving it into a file and typing "sh file". To overwrite existing
# files, type "sh file -c". You can also feed this as standard input via
# unshar, or by typing "sh 'README' <<'END_OF_FILE'
XFortran, Matlab and Splus code for performing
Xunivariate Partial Least Squares Regression.
X------------------------------------------------
XThis software may be freely used for non-commercial purposes and can
Xbe freely distributed.
X
XPlease send bug reports to:
XMike Denham snsdenhm@reading.ac.uk
X+44 734 318 914 (FAX) +44 734 753169
XDepartment of Applied Statistics, Harry Pitt Building
XUniversity of Reading, Whiteknights Road, PO Box 240
XReading RG6 2FN, United Kingdom.
END_OF_FILE
if test 488 -ne `wc -c <'README'`; then
echo shar: \"'README'\" unpacked with wrong size!
fi
# end of 'README'
fi
if test -f 'fortran.sh' -a "${1}" != "-c" ; then
echo shar: Will not clobber existing file \"'fortran.sh'\"
else
echo shar: Extracting \"'fortran.sh'\" \(40696 characters\)
sed "s/^X//" >'fortran.sh' <<'END_OF_FILE'
X#! /bin/sh
X# This is a shell archive. Remove anything before this line, then unpack
X# it by saving it into a file and typing "sh file". To overwrite existing
X# files, type "sh file -c". You can also feed this as standard input via
X# unshar, or by typing "sh 'Makefile' <<'END_OF_FILE'
XXall: pls.a
XX ranlib pls.a
XXpls.a: dtrans.o svdpls1a.o svdpls1b.o svdpls1c.o pls1a.o pls1b.o pls1c.o
XX ar rcv pls.a dtrans.o svdpls1a.o svdpls1b.o svdpls1c.o pls1a.o\
XX pls1b.o pls1c.o
XXdtrans.o: dtrans.f
XX f77 -c -O3 dtrans.f
XXsvdpls1a.o: svdpls1a.f
XX f77 -c -O3 svdpls1a.f
XXsvdpls1b.o: svdpls1b.f
XX f77 -c -O3 svdpls1b.f
XXsvdpls1c.o: svdpls1c.f
XX f77 -c -O3 svdpls1c.f
XXpls1a.o: pls1a.f
XX f77 -c -O3 pls1a.f
XXpls1b.o: pls1b.f
XX f77 -c -O3 pls1b.f
XXpls1c.o: pls1c.f
XX f77 -c -O3 pls1c.f
XX
XX
XEND_OF_FILE
Xif test 476 -ne `wc -c <'Makefile'`; then
X echo shar: \"'Makefile'\" unpacked with wrong size!
Xfi
X# end of 'Makefile'
Xfi
Xif test -f 'README' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'README'\"
Xelse
Xecho shar: Extracting \"'README'\" \(1162 characters\)
Xsed "s/^X//" >'README' <<'END_OF_FILE'
XXFortran subroutines for performing Univariate
XXPartial Least Squares Regression
XX---------------------------------------------
XXThe enclosed Makefile will create a random library
XXof subroutines containing:
XXdtrans: subroutine for transposing a matrix
XXpls1a: PLS1 regression using Orthogonal Scores Algorithm
XXpls1b: PLS1 regression using Orthogonal Loadings Algorithm
XXpls1c: PLS1 regression using a modified Helland Algorithm
XXsvdpls1a: PLS1 regression using Singular Value Decompostion
XX and Orthogonal Scores Algorithm
XXsvdpls1b: PLS1 regression using Singular Value Decomposition
XX and Orthogonal Loadings Algorithm
XXsvdpls1c: PLS1 regression using Singular Value Decomposition
XX and a modified Helland Algorithm
XX
XXThe subroutines assume that the BLAS, BLAS2, BLAS3 and LINPACK
XXlibraries are already available on the machine. If this is not the
XXcase these must be obtained from netlib or statlib.
XX
XXPlease send bug reports to:
XXMike Denham snsdenhm@reading.ac.uk
XX+44 734 318 914 (FAX) +44 734 753169
XXDepartment of Applied Statistics, Harry Pitt Building
XXUniversity of Reading, Whiteknights Road, PO Box 240
XXReading RG6 2FN, United Kingdom.
XX
XX
XEND_OF_FILE
Xif test 1162 -ne `wc -c <'README'`; then
X echo shar: \"'README'\" unpacked with wrong size!
Xfi
X# end of 'README'
Xfi
Xif test -f 'dtrans.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'dtrans.f'\"
Xelse
Xecho shar: Extracting \"'dtrans.f'\" \(858 characters\)
Xsed "s/^X//" >'dtrans.f' <<'END_OF_FILE'
XX SUBROUTINE dtrans(dx,n,m,dxt)
XXC***BEGIN PROLOGUE dtrans
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 930925 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Transposes a double precision matrix
XXC
XXC On Entry
XXC dx DOUBLE PRECISION(n,m)
XXC contains the matrix dx
XXC
XXC n INTEGER
XXC n is the number of rows of x
XXC
XXC m INTEGER
XXC m is the number of columns of x
XXC
XXC On Return
XXC
XXC dxt DOUBLE PRECISION(m,n)
XXC dxt contains the matrix transpose of x
XXC
XXC***END PROLOGUE dtrans
XX double precision dx(n,m),dxt(m,n)
XX integer n,m,i,j
XX
XX do 10 i=1,n
XX do 20 j=1,m
XX dxt(j,i)=dx(i,j)
XX 20 continue
XX 10 continue
XX return
XX end
XX
XX
XEND_OF_FILE
Xif test 858 -ne `wc -c <'dtrans.f'`; then
X echo shar: \"'dtrans.f'\" unpacked with wrong size!
Xfi
X# end of 'dtrans.f'
Xfi
Xif test -f 'pls1a.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1a.f'\"
Xelse
Xecho shar: Extracting \"'pls1a.f'\" \(7518 characters\)
Xsed "s/^X//" >'pls1a.f' <<'END_OF_FILE'
XX SUBROUTINE pls1a(x,y,n,k,a,w,p,t,b,work,q,ipvt)
XXC***BEGIN PROLOGUE pls1a
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC the orthogonal scores algorithm
XXC***DESCRIPTION
XXC
XXC pls1a performs the partial least squares regression of a double
XXC precision vector y on a double precision matrix x. It is assumed
XXC that both the vector y and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y and each of the
XXC columns of x should sum to zero.
XXC
XXC References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XXC Chichester: John Wiley & Sons
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y DOUBLE PRECISION(n)
XXC contains the centred vector y.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XX
XXC On Return
XXC
XXC x x contains the residual information not accounted for by the
XXC a partial least squares factors.
XXC
XXC y y contains the residuals of an a-factor partial
XXC least squares regression of y on x.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC w contains the coefficient vectors stored by column of
XXC the a PLS regression factors obtained from the matrix x.
XXC
XXC p DOUBLE PRECISION(k,a)
XXC p contains the matrix of least squares regression
XXC coefficients from regressing the matrix x on the PLS
XXC factors t.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC t contains the a PLS regression factors stored by column.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y on x.
XXC
XXC work DOUBLE PRECISION(a,a)
XXC work is workspace required by the subroutine CALCB.
XXC
XXC q DOUBLE PRECISION(a)
XXC q contains the least squares regression coefficients of
XXC the ordinary least squares regression of y on the PLS
XXC factor matrix t.
XXC
XXC ipvt INTEGER(a)
XXC ipvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DGEFA and DGESL called from
XXC the subroutine CALCB.
XXC
XXC pls1a uses the following functions and subprograms
XXC
XXC BLAS DAXPY,DCOPY,DDOT,DNRM2,DSCAL
XXC BLAS2 DGEMV,DGER
XXC
XXC References: Lawson, C., Hanson, R., Kincaid, D. and Krogh, F. (1979).
XXC Basic Linear Algebra Subprograms for Fortran Usage.
XXC ACM Transactions on Mathematical Software, 5, 308-325.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An Extended Set of Fortran Basic Linear
XXC Algebra Subprograms. ACM Transactions on Mathematical
XXC Software, 14, 1-32.
XXC
XXC***END PROLOGUE pls1a
XX double precision x(n,k),y(n),w(k,a),p(k,a),t(n),b(k),
XX + work(a,a),q(a),c
XX
XX integer n,k,a,ipvt(a),i
XX
XX double precision dnrm2,ddot
XX
XX do 10 i=1,a
XX call dgemv('T',n,k,1.d0,x,n,y,1,0.d0,w(1,i),1)
XX c=dnrm2(k,w(1,i),1)
XX c=1/c
XX call dscal(k,c,w(1,i),1)
XX call dgemv('N',n,k,1.d0,x,n,w(1,i),1,0.d0,t,1)
XX c=dnrm2(n,t,1)
XX c=c*c
XX call dgemv('T',n,k,1/c,x,n,t,1,0.d0,p(1,i),1)
XX q(i)=ddot(n,y,1,t,1)
XX q(i)=q(i)/c
XX call dger(n,k,-1.d0,t,1,p(1,i),1,x,n)
XX call daxpy(n,-q(i),t,1,y,1)
XX10 continue
XX
XX call calcb(w,p,q,k,a,b,work,ipvt)
XX return
XX end
XX
XX SUBROUTINE calcb(w,p,q,k,a,b,work,ipvt)
XXC***BEGIN PROLOGUE calcb
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE calculates the coefficient vector of the PLS regression.
XXC
XXC***DESCRIPTION
XXC
XXC calcb calculates the PLS regression coefficients of the vector y on
XXC the matrix x from auxiliary information provided by its calling
XXC routine pls1a. This subroutine should only be called from pls1a.
XXC
XXC References: Denham, M. C. (1993) Implementing Partial Least Squares.
XXC Statistics and Computing, (subject to Revision).
XXC
XXC On Entry
XXC
XXC w DOUBLE PRECISION(k,a)
XXC w contains the coefficient vectors stored by column of
XXC the a PLS regression factors obtained from the matrix x.
XXC
XXC p DOUBLE PRECISION(k,a)
XXC p contains the matrix of least squares regression
XXC coefficients from regressing the matrix x on the PLS
XXC factors.
XXC
XXC q DOUBLE PRECISION(a)
XXC q contains the least squares regression coefficients of
XXC the ordinary least squares regression of y on the PLS
XXC factor matrix.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XXC On Return
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y on x.
XXC
XXC work DOUBLE PRECISION(a,a)
XXC work is workspace required by the subroutine calcb.
XXC
XXC ipvt INTEGER(a)
XXC ipvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DGEFA and DGESL conatined in
XXC the subroutine calcb.
XXC
XXC calcb uses the following functions and subprograms
XXC
XXC BLAS2 DGEMV
XXC BLAS3 DGEMM
XXC LINPACK DGEFA,DGESL
XXC References: Dongarra, J. J., Bunch, J. R., Moler, C. B. and
XXC Stewart, G. W. (1979). LINPACK User's Guide.
XXC Philadelphia: SIAM Publications.
XXC
XXC Dongarra, J. J.,DuCroz, J., Duff, I. and Hammarling, S.
XXC (1988). A Set of Level 3 Basic Linear Algebra
XXC Subprograms. ACM Transactions on Mathematical Software,
XXC 16, 1-28.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An Extended Set of Fortran Basic Linear
XXC Algebra Subprograms. ACM Transactions on Mathematical
XXC Software, 14, 1-32.
XXC
XXC***END PROLOGUE calcb
XX double precision w(k,a),p(k,a),q(a),b(k),work(a,a)
XX
XX integer k,a,ipvt(a),info
XX
XX call dgemm('T','N',a,a,k,1.d0,p,k,w,k,0.d0,work,a)
XX call dgefa(work,a,a,ipvt,info)
XX call dgesl(work,a,a,ipvt,q,0)
XX call dgemv('N',k,a,1.d0,w,k,q,1,0.d0,b,1)
XX return
XX end
XEND_OF_FILE
Xif test 7518 -ne `wc -c <'pls1a.f'`; then
X echo shar: \"'pls1a.f'\" unpacked with wrong size!
Xfi
X# end of 'pls1a.f'
Xfi
Xif test -f 'pls1b.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1b.f'\"
Xelse
Xecho shar: Extracting \"'pls1b.f'\" \(4976 characters\)
Xsed "s/^X//" >'pls1b.f' <<'END_OF_FILE'
XX SUBROUTINE pls1b(x,y0,y,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum)
XXC***BEGIN PROLOGUE pls1b
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC the orthogonal loadings algorithm
XXC***DESCRIPTION
XXC
XXC pls1b performs the partial least squares regression of a double
XXC precision vector y0 on a double precision matrix x. It is assumed
XXC that both the vector y0 and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y0 and each of the
XXC columns of x should sum to zero.
XXC
XXC References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XXC Chichester: John Wiley & Sons
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y0 DOUBLE PRECISION(n)
XXC contains the centred vector y0.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XX
XXC On Return
XXC
XXC x x contains the residual information not accounted for by the
XXC a partial least squares factors i.e. X = X - T*Transpose(W)
XXC
XXC y DOUBLE PRECISION(n)
XXC y contains the residuals of an a-factor partial
XXC least squares regression of y0 on x.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC w contains the coefficient vectors stored by column of
XXC the a PLS regression factors obtained from the matrix x.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC t contains the a PLS regression factors stored by column.
XXC
XXC qrt DOUBLE PRECISION(n,a)
XXC qrt is workspace used for passing the partial least
XXC squares factors matrix t to the LINPACK subroutines
XXC DQRDC and DQRSL without destroying the matrix t. On
XXC exit from DQRDC, qrt contains the QR decomposition of
XXC the matrix t.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y0 on x.
XXC
XXC qraux DOUBLE PRECISION(a)
XXC qraux contains auxiliary output from the LINPACK
XXC subroutine DQRDC.
XXC
XXC q DOUBLE PRECISION(a)
XXC q contains the least squares regression coefficients of
XXC the ordinary least squares regression of y0 on the PLS
XXC factor matrix t.
XXC
XXC jpvt INTEGER(a)
XXC jpvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DQRDC and DQRSL.
XXC
XXC dum DOUBLE PRECISION(n)
XXC dum is workspace used by the LINPACK subroutine DQRSL.
XXC
XXC pls1b uses the following functions and subprograms
XXC
XXC BLAS DCOPY,DNRM2,DSCAL
XXC BLAS2 DGEMV,DGER
XXC LINPACK DQRDC,DQRSL
XXC References: Dongarra, J. J., Bunch, J. R., Moler, C. B. and
XXC Stewart, G. W. (1979). LINPACK User's Guide.
XXC Philadelphia: SIAM Publications.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An extend set of Fortran basic linear
XXC algebra subprograms. ACM Transactions on Mathematical
XXC Software, 14, 1-32.
XXC
XXC Lawson, C., Hanson, R., Kincaid, D. and Krogh, F. (1979).
XXC A Set of Basic Linear Algebra Subprograms for Fortran Usage.
XXC ACM Transaction on Mathematical Software, 5, 308-325.
XXC
XXC***END PROLOGUE pls1b
XX double precision x(n,k),y0(n),y(n),w(k,a),t(n,a),
XX + b(k),qraux(a),q(a),c,dum(n),qrt(n,a)
XX
XX integer n,k,a,jpvt(a),i,j
XX
XX double precision dnrm2
XX
XX call dcopy(n,y0,1,y,1)
XX do 10 i=1,a
XX call dgemv('T',n,k,1.d0,x,n,y,1,0.d0,w(1,i),1)
XX c=dnrm2(k,w(1,i),1)
XX c=1.d0/c
XX call dscal(k,c,w(1,i),1)
XX call dgemv('N',n,k,1.d0,x,n,w(1,i),1,0.d0,t(1,i),1)
XX call dcopy(n*i,t(1,1),1,qrt(1,1),1)
XX call dqrdc(qrt,n,n,i,qraux,jpvt,dum,0)
XX call dqrsl(qrt,n,n,i,qraux,y0,dum,dum,q,dum,dum,00100,j)
XX call dger(n,k,-1.d0,t(1,i),1,w(1,i),1,x,n)
XX call dcopy(n,y0,1,y,1)
XX call dgemv('N',n,i,-1.d0,t(1,1),n,q,1,1.d0,y,1)
XX10 continue
XX
XX call dgemv('N',k,a,1.d0,w(1,1),k,q,1,0.d0,b,1)
XX return
XX end
XX
XEND_OF_FILE
Xif test 4976 -ne `wc -c <'pls1b.f'`; then
X echo shar: \"'pls1b.f'\" unpacked with wrong size!
Xfi
X# end of 'pls1b.f'
Xfi
Xif test -f 'pls1c.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1c.f'\"
Xelse
Xecho shar: Extracting \"'pls1c.f'\" \(5035 characters\)
Xsed "s/^X//" >'pls1c.f' <<'END_OF_FILE'
XX SUBROUTINE pls1c(x,y,rsd,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum)
XXC***BEGIN PROLOGUE pls1c
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931022 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC a modification of an algorithm by Helland(1988).
XXC***DESCRIPTION
XXC
XXC pls1c performs the partial least squares regression of a double
XXC precision vector y on a double precision matrix x. It is assumed
XXC that both the vector y and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y and each of the
XXC columns of x should sum to zero.
XXC
XXC This is a modified version of an algorithm for PLS1 regression given
XXC in Helland (1988)
XXC
XXC References: Helland, I. S. (1988) On the structure of partial least
XXC squares regression. Communications in Statistics ---
XXC Elements of Simulation and Computation, 17, 581-607.
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y DOUBLE PRECISION(n)
XXC contains the centred vector y.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y on the matrix x.
XXC
XX
XXC On Return
XXC
XXC rsd DOUBLE PRECISION(n)
XXC rsd contains the residuals of an a-factor partial
XXC least squares regression of y on x.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC w contains the coefficient vectors stored by column of
XXC the a PLS regression factors obtained from the matrix x.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC t contains the a PLS regression factors stored by column,
XXC so that t = xw.
XXC
XXC qrt DOUBLE PRECISION(n,a)
XXC qrt is workspace used for passing the partial least
XXC squares factors matrix t to the LINPACK subroutines
XXC DQRDC and DQRSL without destroying the matrix t. On
XXC exit from DQRDC, qrt contains the QR decomposition of
XXC the matrix t.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y on x.
XXC
XXC qraux DOUBLE PRECISION(a)
XXC qraux contains auxiliary output from the LINPACK
XXC subroutine DQRDC.
XXC
XXC q DOUBLE PRECISION(a)
XXC q contains the least squares regression coefficients of
XXC the ordinary least squares regression of y on the PLS
XXC factor matrix t.
XXC
XXC jpvt INTEGER(a)
XXC jpvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DQRDC and DQRSL.
XXC
XXC dum DOUBLE PRECISION(n)
XXC dum is workspace used by the LINPACK subroutine DQRSL.
XXC
XXC pls1c uses the following functions and subprograms
XXC
XXC BLAS DCOPY
XXC BLAS2 DGEMV
XXC LINPACK DQRDC,DQRSL
XXC References: Dongarra, J. J., Bunch, J. R., Moler, C. B. and
XXC Stewart, G. W. (1979). LINPACK User's Guide.
XXC Philadelphia: SIAM Publications.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An extend set of Fortran basic linear
XXC algebra subprograms. ACM Transactions on Mathematical
XXC Software, 14, 1-32.
XXC
XXC Lawson, C., Hanson, R., Kincaid, D. and Krogh, F. (1979).
XXC A Set of Basic Linear Algebra Subprograms for Fortran Usage.
XXC ACM Transactions on Mathematical Software, 5, 308-325.
XXC
XXC***END PROLOGUE pls1c
XX double precision x(n,k),y(n),w(k,a),t(n,a),b(k),qraux(a),
XX + q(a),dum(n),qrt(n,a),rsd(n)
XX
XX integer n,k,a,jpvt(a),i,j
XX
XX call dgemv('T',n,k,1.d0,x,n,y,1,0.d0,w(1,1),1)
XX call dgemv('N',n,k,1.d0,x,n,w(1,1),1,0.d0,t(1,1),1)
XX call dcopy(n,t(1,1),1,qrt(1,1),1)
XX
XX call dqrdc(qrt,n,n,1,qraux,jpvt,dum,0)
XX call dqrsl(qrt,n,n,1,qraux,y,dum,dum,q,rsd,dum,00110,j)
XX
XX do 10 i=2,a
XX call dgemv('T',n,k,1.d0,x,n,rsd,1,0.d0,w(1,i),1)
XX call dgemv('N',n,k,1.d0,x,n,w(1,i),1,0.d0,t(1,i),1)
XX call dcopy(n*i,t(1,1),1,qrt(1,1),1)
XX call dqrdc(qrt,n,n,i,qraux,jpvt,dum,0)
XX call dqrsl(qrt,n,n,i,qraux,y,dum,dum,q,rsd,dum,00110,j)
XX10 continue
XX
XX call dgemv('N',k,a,1.d0,w(1,1),k,q,1,0.d0,b,1)
XX return
XX end
XEND_OF_FILE
Xif test 5035 -ne `wc -c <'pls1c.f'`; then
X echo shar: \"'pls1c.f'\" unpacked with wrong size!
Xfi
X# end of 'pls1c.f'
Xfi
Xif test -f 'svdpls1a.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1a.f'\"
Xelse
Xecho shar: Extracting \"'svdpls1a.f'\" \(5198 characters\)
Xsed "s/^X//" >'svdpls1a.f' <<'END_OF_FILE'
XX SUBROUTINE svdpls1a(x,y,n,k,a,w,p,t,b,work,q,ipvt,xt,e)
XXC***BEGIN PROLOGUE svdpls1a
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC the orthogonal scores algorithm with possible prior
XXC orthogonalisation using the singular value decomposition
XXC algorithm.
XXC***DESCRIPTION
XXC
XXC svdpls1a performs the partial least squares regression of a double
XXC precision vector y on a double precision matrix x. It is assumed
XXC that both the vector y and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y and each of the
XXC columns of x should sum to zero. If the number of columns of x is less
XXC than the number of rows the subroutine PLS1A is called. If the number
XXC of columns is greater than or equal to the number of rows then a
XXC canonical form for the regression is created using the singular value
XXC decomposition of Transpose(x) which is obtained by the subroutine
XXC DSVDC. The orthogonal scores algorithm PLS1A is applied to this canonical
XXC form and the resulting coefficient vector is tranformed back into
XXC the coefficient vector for the original pls regression of y on x.
XXC
XXC References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XXC Chichester: John Wiley & Sons
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y DOUBLE PRECISION(n)
XXC contains the centred vector y.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XXC
XXC On Return
XXC
XXC x if k < n x contains the residual information not accounted
XXC for by the a partial least squares factors, otherwise x
XXC is overwriten as workspace.
XXC
XXC y if k < n y contains the residuals of an a-factor partial
XXC least squares regression of y on x, otherwise y is unchanged.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC if k < n w contains the coefficient vectors stored by
XXC column of the a PLS regression factors obtained from the
XXC matrix x.
XXC
XXC p DOUBLE PRECISION(k,a)
XXC if k < n p contains the matrix of least squares regression
XXC coefficients from regressing the matrix x on the PLS
XXC factors t.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC if k < n t contains the a PLS regression factors stored
XXC by column.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y on x.
XXC
XXC work DOUBLE PRECISION(a,a)
XXC work is workspace required by the subroutine CALCB.
XXC
XXC q DOUBLE PRECISION(a)
XXC if k < n q contains the least squares regression
XXC coefficients of the ordinary least squares regression of
XXC y on the PLS factor matrix t.
XXC
XXC ipvt INTEGER(a)
XXC ipvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DGEFA and DGESL called via PLS1A
XXC
XXC svdpls1a uses the following functions and subprograms
XXC
XXC DTRANS
XXC PLS1A
XXC BLAS DAXPY,DCOPY,DDOT,DNRM2,DSCAL
XXC BLAS2 DGEMV,DGER
XXC LINPACK DSVDC
XXC
XXC References: Dongarra, J. J.,DuCroz, J., Duff, I. and Hammarling, S.
XXC (1988). An set of level 3 basic linear algebra
XXC subprograms. Technical Report. Agronne National Laboratory,
XXC 9700 South Cass Avenue, Argonne, Illinois 60439.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An extend set of Fortran basic linear
XXC algebra subprograms. ACM Transactions in Mathematical
XXC Software, 14, 1-32.
XXC
XXC***END PROLOGUE svdpls1a
XX double precision x(n,k),y(n),w(k,a),p(k,a),t(n),b(k),
XX + work(a,a),q(a),xt(k,n),e(n)
XX
XX integer rank,info
XX
XX
XX rank=min(n-1,k)
XX
XX if (rank .eq. k) then
XX call pls1a(x,y,n,k,a,w,p,t,b,work,q,ipvt)
XX else
XX call dtrans(x,n,k,xt)
XX call dsvdc(xt,k,k,n,t,e,xt,k,x,n,b,21,info)
XX call dgemv('T',n,rank,1.d0,x,n,y,1,0.d0,e,1)
XX call dcopy(rank*rank,0.d0,0,x,1)
XX call dcopy(rank,t,1,x,rank+1)
XX call pls1a(x,e,rank,rank,a,w,p,b,t,work,q,ipvt)
XX call dgemv('N',k,rank,1.d0,xt,k,t,1,0.d0,b,1)
XX endif
XX return
XX end
XX
XX
XX
XEND_OF_FILE
Xif test 5198 -ne `wc -c <'svdpls1a.f'`; then
X echo shar: \"'svdpls1a.f'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1a.f'
Xfi
Xif test -f 'svdpls1b.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1b.f'\"
Xelse
Xecho shar: Extracting \"'svdpls1b.f'\" \(5320 characters\)
Xsed "s/^X//" >'svdpls1b.f' <<'END_OF_FILE'
XX SUBROUTINE svdpls1b(x,y0,y,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum,xt,e)
XXC***BEGIN PROLOGUE svdpls1b
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC the orthogonal loadings algorithm with possible prior
XXC orthogonalisation using the singular value decomposition
XXC algorithm.
XXC***DESCRIPTION
XXC
XXC svdpls1b performs the partial least squares regression of a double
XXC precision vector y0 on a double precision matrix x. It is assumed
XXC that both the vector y0 and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y0 and each of the
XXC columns of x should sum to zero. If the number of columns of x is less
XXC than the number of rows the subroutine PLS1B is called. If the number
XXC of columns is greater than or equal to the number of rows then a
XXC canonical form for the regression is created using the singular value
XXC decomposition of Transpose(x) which is obtained by the subroutine
XXC DSVDC. The orthogonal scores algorithm PLS1B is applied to this canonical
XXC form and the resulting coefficient vector is tranformed back into
XXC the coefficient vector for the original pls regression of y on x.
XXC
XXC References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XXC Chichester: John Wiley & Sons
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y0 DOUBLE PRECISION(n)
XXC contains the centred vector y0.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XXC
XXC On Return
XXC
XXC x if k < n x contains the residual information not accounted
XXC for by the a partial least squares factors, otherwise x
XXC is overwriten as workspace.
XXC
XXC y if k < n y contains the residuals of an a-factor partial
XXC least squares regression of y0 on x, otherwise y is
XXC unchanged.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC if k < n w contains the coefficient vectors stored by
XXC column of the a PLS regression factors obtained from the
XXC matrix x.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC if k < n t contains the a PLS regression factors stored
XXC by column.
XXC
XXC qrt DOUBLE PRECISION(n,a)
XXC qrt is workspace used for passing the matrix t to the
XXC LINPACK subroutines DQRDC and DQRSl without destroying t.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y0 on x.
XXC
XXC qraux DOUBLE PRECISION(a)
XXC qraux contains auxiliary output from the LINPACK
XXC subroutine DQRDC.
XXC
XXC q DOUBLE PRECISION(a)
XXC if k < n q contains the least squares regression
XXC coefficients of the ordinary least squares regression of
XXC y on the PLS factor matrix t.
XXC
XXC jpvt INTEGER(a)
XXC jpvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DQRDC and DQRSL called via PLS1B
XXC
XXC svdpls1b uses the following functions and subprograms
XXC
XXC DTRANS
XXC PLS1B
XXC BLAS DCOPY
XXC BLAS2 DGEMV
XXC LINPACK DQRDC,DQRSL,DSVDC
XXC
XXC References: Dongarra, J. J.,DuCroz, J., Duff, I. and Hammarling, S.
XXC (1988). An set of level 3 basic linear algebra
XXC subprograms. Technical Report. Agronne National Laboratory,
XXC 9700 South Cass Avenue, Argonne, Illinois 60439.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An extend set of Fortran basic linear
XXC algebra subprograms. ACM Transactions in Mathematical
XXC Software, 14, 1-32.
XXC
XXC***END PROLOGUE svdpls1b
XX
XX double precision x(n,k),y0(n),y(n),w(k,a),t(n,a),
XX + b(k),qraux(a),q(a),dum(n),qrt(n,a),xt(k,n),
XX + e(n)
XX integer n,k,a,jpvt(a),rank,info
XX
XX rank=min(n-1,k)
XX
XX if (rank .eq. k) then
XX call pls1b(x,y0,y,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum)
XX else
XX call dtrans(x,n,k,xt)
XX call dsvdc(xt,k,k,n,t,e,xt,k,x,n,b,21,info)
XX call dgemv('T',n,rank,1.d0,x,n,y,1,0.d0,e,1)
XX call dcopy(rank*rank,0.d0,0,x,1)
XX call dcopy(rank,t,1,x,rank+1)
XX call dcopy(rank,e,1,y,1)
XX call pls1b(x,e,y,rank,rank,a,w,t,qrt,y0,qraux,q,jpvt,dum)
XX call dgemv('N',k,rank,1.d0,xt,k,y0,1,0.d0,b,1)
XX endif
XX return
XX end
XX
XEND_OF_FILE
Xif test 5320 -ne `wc -c <'svdpls1b.f'`; then
X echo shar: \"'svdpls1b.f'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1b.f'
Xfi
Xif test -f 'svdpls1c.f' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1c.f'\"
Xelse
Xecho shar: Extracting \"'svdpls1c.f'\" \(5468 characters\)
Xsed "s/^X//" >'svdpls1c.f' <<'END_OF_FILE'
XX subroutine svdpls1c(x,y,rsd,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum,xt,e)
XXC***BEGIN PROLOGUE svdpls1c
XXC***DATE WRITTEN 930925 (YYMMDD)
XXC***REVISION DATE 931023 (YYMMDD)
XXC***AUTHOR DENHAM, M. C., (University of Reading)
XXC***EMAIL snsdenhm@reading.ac.uk
XXC***PURPOSE Performs univariate partial least squares regression using
XXC a modified version of an algorithm due to Helland(1988)
XXC with possible prior orthogonalisation using the singular value
XXC decomposition algorithm.
XXC***DESCRIPTION
XXC
XXC svdpls1c performs the partial least squares regression of a double
XXC precision vector y on a double precision matrix x. It is assumed
XXC that both the vector y and the matrix x have been centred
XXC before entry into the subroutine, i.e. the vector y and each of the
XXC columns of x should sum to zero. If the number of columns of x is less
XXC than the number of rows the subroutine PLS1C is called. If the number
XXC of columns is greater than or equal to the number of rows then a
XXC canonical form for the regression is created using the singular value
XXC decomposition of Transpose(x) which is obtained by the subroutine
XXC DSVDC. The orthogonal scores algorithm PLS1C is applied to this canonical
XXC form and the resulting coefficient vector is tranformed back into
XXC the coefficient vector for the original pls regression of y on x.
XXC
XXC References: Helland, I. S. (1988) On the structure of partial least
XXC squares regression. Communications in Statistics ---
XXC Elements of Simulation and Computation, 17, 581-607.
XXC
XXC Martens, H. and Naes, T. (1989) Multivariate Calibration
XXC Chichester: John Wiley & Sons
XXC
XXC Denham, M. C. (1994) Implementing Partial Least Squares.
XXC Statistics and Computing, (to appear).
XXC
XXC On Entry
XXC x DOUBLE PRECISION(n,k)
XXC contains the centred matrix x.
XXC
XXC y DOUBLE PRECISION(n)
XXC contains the centred vector y.
XXC
XXC n INTEGER
XXC n is the number of rows of the matrix x.
XXC
XXC k INTEGER
XXC k is the number of columns of the matrix x.
XXC
XXC a INTEGER a .LE. MIN(n-1,k)
XXC a is the number of PLS factors to include in the regression
XXC of y0 on the matrix x.
XXC
XXC
XXC On Return
XXC
XXC x if k < n x contains the residual information not accounted
XXC for by the a partial least squares factors, otherwise x
XXC is overwriten as workspace.
XXC
XXC rsd if k < n y contains the residuals of an a-factor partial
XXC least squares regression of y on x.
XXC
XXC w DOUBLE PRECISION(k,a)
XXC if k < n w contains the coefficient vectors stored by
XXC column of the a PLS regression factors obtained from the
XXC matrix x.
XXC
XXC t DOUBLE PRECISION(n,a)
XXC if k < n t contains the a PLS regression factors stored
XXC by column.
XXC
XXC qrt DOUBLE PRECISION(n,a)
XXC qrt is workspace used for passing the matrix t to the
XXC LINPACK subroutines DQRDC and DQRSl without destroying t.
XXC
XXC b DOUBLE PRECISION(k)
XXC b are the PLS regression coefficients of the a-factor
XXC PLS regression of y0 on x.
XXC
XXC qraux DOUBLE PRECISION(a)
XXC qraux contains auxiliary output from the LINPACK
XXC subroutine DQRDC.
XXC
XXC q DOUBLE PRECISION(a)
XXC if k < n q contains the least squares regression
XXC coefficients of the ordinary least squares regression of
XXC y on the PLS factor matrix t.
XXC
XXC jpvt INTEGER(a)
XXC jpvt contains integers that control the use of pivoting
XXC in the LINPACK subroutines DQRDC and DQRSL called via PLS1C
XXC
XXC svdpls1c uses the following functions and subprograms
XXC
XXC DTRANS
XXC PLS1C
XXC BLAS DCOPY
XXC BLAS2 DGEMV
XXC LINPACK DSVDC
XXC
XXC References: Dongarra, J. J.,DuCroz, J., Duff, I. and Hammarling, S.
XXC (1988). An set of level 3 basic linear algebra
XXC subprograms. Technical Report. Agronne National Laboratory,
XXC 9700 South Cass Avenue, Argonne, Illinois 60439.
XXC
XXC Dongarra, J. J., DuCroz, J., Hammarling, S. and
XXC Hanson, R. (1988). An extend set of Fortran basic linear
XXC algebra subprograms. ACM Transactions in Mathematical
XXC Software, 14, 1-32.
XXC
XXC***END PROLOGUE svdpls1c
XX
XX
XX double precision x(n,k),y(n),rsd(n),w(k,a),t(n,a),
XX + b(k),qraux(a),q(a),dum(n),qrt(n,a),xt(k,n),
XX + e(n)
XX integer n,k,a,jpvt(a),rank,info
XX
XX rank=min(n-1,k)
XX
XX if (rank .eq. k) then
XX call pls1c(x,y,rsd,n,k,a,w,t,qrt,b,qraux,q,jpvt,dum)
XX else
XX call dtrans(x,n,k,xt)
XX call dsvdc(xt,k,k,n,t,e,xt,k,x,n,b,21,info)
XX call dgemv('T',n,rank,1.d0,x,n,y,1,0.d0,e,1)
XX call dcopy(rank*rank,0.d0,0,x,1)
XX call dcopy(rank,t,1,x,rank+1)
XX call pls1c(x,e,rsd,rank,rank,a,w,t,qrt,y,qraux,q,jpvt,dum)
XX call dgemv('N',k,rank,1.d0,xt,k,y,1,0.d0,b,1)
XX endif
XX return
XX end
XX
XEND_OF_FILE
Xif test 5468 -ne `wc -c <'svdpls1c.f'`; then
X echo shar: \"'svdpls1c.f'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1c.f'
Xfi
Xecho shar: End of shell archive.
Xexit 0
END_OF_FILE
if test 40696 -ne `wc -c <'fortran.sh'`; then
echo shar: \"'fortran.sh'\" unpacked with wrong size!
fi
# end of 'fortran.sh'
fi
if test -f 'matlab.sh' -a "${1}" != "-c" ; then
echo shar: Will not clobber existing file \"'matlab.sh'\"
else
echo shar: Extracting \"'matlab.sh'\" \(10948 characters\)
sed "s/^X//" >'matlab.sh' <<'END_OF_FILE'
X#! /bin/sh
X# This is a shell archive. Remove anything before this line, then unpack
X# it by saving it into a file and typing "sh file". To overwrite existing
X# files, type "sh file -c". You can also feed this as standard input via
X# unshar, or by typing "sh 'README' <<'END_OF_FILE'
XXMatlab functions for performing univariate
XXPartial Least Squares Regression.
XX------------------------------------------------
XXPlace files in a directory searched by Matlab
XX
XXPlease send bug reports to:
XXMike Denham snsdenhm@reading.ac.uk
XX+44 734 318 914 (FAX) +44 734 753169
XXDepartment of Applied Statistics, Harry Pitt Building
XXUniversity of Reading, Whiteknights Road, PO Box 240
XXReading RG6 2FN, United Kingdom.
XEND_OF_FILE
Xif test 427 -ne `wc -c <'README'`; then
X echo shar: \"'README'\" unpacked with wrong size!
Xfi
X# end of 'README'
Xfi
Xif test -f 'pls1a.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1a.m'\"
Xelse
Xecho shar: Extracting \"'pls1a.m'\" \(1120 characters\)
Xsed "s/^X//" >'pls1a.m' <<'END_OF_FILE'
XXfunction[b] = pls1a(X,y,K)
XX%PLS1A Univariate Partial Least Squares Regression.
XX% PLS1A finds the K factor PLS1 regression of the vector y on X
XX% using the Orthogonal Loadings Algorithm
XX%
XX% References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XX% Chichester: John Wiley & Sons
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1b,pls1c,svdpls1a,svdpls1b,svdpls1c
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XXsizex=size(X);
XXW=zeros(sizex(2),K);
XXP=zeros(sizex(2),K);
XXQ=zeros(K,1);
XXfor i = 1:K
XX w=X'*y;
XX w=w/sqrt(w'*w);
XX W(:,i)=w;
XX t=X*w;
XX c=t'*t;
XX p=X'*(t/c);
XX P(:,i)=p;
XX q=(y'*t)/c;
XX Q(i,1)=q;
XX X=X-t*p';
XX y=y-q*t;
XXend
XXb=W*((P'*W)\Q);
XEND_OF_FILE
Xif test 1120 -ne `wc -c <'pls1a.m'`; then
X echo shar: \"'pls1a.m'\" unpacked with wrong size!
Xfi
X# end of 'pls1a.m'
Xfi
Xif test -f 'pls1b.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1b.m'\"
Xelse
Xecho shar: Extracting \"'pls1b.m'\" \(1105 characters\)
Xsed "s/^X//" >'pls1b.m' <<'END_OF_FILE'
XXfunction[b] = pls1b(X,y,K)
XX%PLS1B Univariate Partial Least Squares Regression.
XX% PLS1B finds the K factor PLS1 regression of the vector y on X
XX% using the Orthogonal Loadings Algorithm
XX%
XX% References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XX% Chichester: John Wiley & Sons
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1a,pls1c,svdpls1a,svdpls1b,svdpls1c
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XX
XXsizex=size(X);
XXW=zeros(sizex(2),K);
XXT=zeros(sizex(1),K);
XXy0=y;
XXfor i = 1:K
XX w=X'*y;
XX w=w/sqrt(w'*w);
XX W(:,i)=w;
XX t=X*w;
XX T(:,i)=t;
XX Q=T(:,1:i)\y0;
XX X=X-t*w';
XX y=y0-T(:,1:i)*Q;
XXend
XXb=W*Q;
XX
XEND_OF_FILE
Xif test 1105 -ne `wc -c <'pls1b.m'`; then
X echo shar: \"'pls1b.m'\" unpacked with wrong size!
Xfi
X# end of 'pls1b.m'
Xfi
Xif test -f 'pls1c.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1c.m'\"
Xelse
Xecho shar: Extracting \"'pls1c.m'\" \(1295 characters\)
Xsed "s/^X//" >'pls1c.m' <<'END_OF_FILE'
XXfunction[b] = pls1c(X,y,K)
XX%PLS1C Univariate Partial Least Squares Regression.
XX% PLS1C finds the K factor PLS1 regression of the vector y on X
XX% using a modification of an algorithm by Helland(1988).
XX%
XX% References: Helland, I. S. (1988) On the structure of partial least
XX% squares regression. Communications in Statistics ---
XX% Elements of Simulation and Computation, 17, 581-607.
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1a,pls1b,svdpls1a,svdpls1b,svdpls1c
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XX
XXsizex=size(X);
XXW=zeros(sizex(2),K);
XXXW=zeros(sizex(1),K);
XXw = X'*y;
XXW(:,1) = w;
XXXW(:,1) = X*w;
XXb = XW(:,1)\y;
XXr = y - XW(:,1)*b;
XXif K > 1,
XX for i = 2:K
XX w=X'*r;
XX W(:,i)=w;
XX XW(:,i)=X*w;
XX b=XW(:,1:i)\y;
XX r=y-XW(:,1:i)*b;
XX end
XXend
XXb=W*b;
XEND_OF_FILE
Xif test 1295 -ne `wc -c <'pls1c.m'`; then
X echo shar: \"'pls1c.m'\" unpacked with wrong size!
Xfi
X# end of 'pls1c.m'
Xfi
Xif test -f 'svdpls1a.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1a.m'\"
Xelse
Xecho shar: Extracting \"'svdpls1a.m'\" \(1199 characters\)
Xsed "s/^X//" >'svdpls1a.m' <<'END_OF_FILE'
XXfunction[b] = svdpls1a(X,y,K)
XX%SVDPLS1A Univariate Partial Least Squares Regression.
XX% SVDPLS1A finds the K factor PLS1 regression of the vector y
XX% on X using the Orthogonal Scores Algorithm. The problem is
XX% first converted into canonical form using the singular value
XX% decompostion. PLS1A is then applied to this canonical problem
XX% and the solution is transformed back to the original problem.
XX%
XX% References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XX% Chichester: John Wiley & Sons
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1a,pls1b,pls1c,svdpls1b,svdpls1c
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XX
XXsizex=size(X);
XXr=min(sizex(1)-1,sizex(2));
XX[v,s,u]=svd(X',0);
XXb=v(:,1:r)*pls1a(s(1:r,1:r),u(:,1:r)'*y,K);
XEND_OF_FILE
Xif test 1199 -ne `wc -c <'svdpls1a.m'`; then
X echo shar: \"'svdpls1a.m'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1a.m'
Xfi
Xif test -f 'svdpls1b.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1b.m'\"
Xelse
Xecho shar: Extracting \"'svdpls1b.m'\" \(1200 characters\)
Xsed "s/^X//" >'svdpls1b.m' <<'END_OF_FILE'
XXfunction[b] = svdpls1b(X,y,K)
XX%SVDPLS1B Univariate Partial Least Squares Regression.
XX% SVDPLS1B finds the K factor PLS1 regression of the vector y
XX% on X using the Orthogonal Loadings Algorithm. The problem is
XX% first converted into canonical form using the singular value
XX% decompostion. PLS1B is then applied to this canonical problem
XX% and the solution is transformed back to the original problem.
XX%
XX% References: Martens, H. and Naes, T. (1989) Multivariate Calibration
XX% Chichester: John Wiley & Sons
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1a,pls1b,pls1c,svdpls1a,svdpls1c
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XXsizex=size(X);
XXr=min(sizex(1)-1,sizex(2));
XX[v,s,u]=svd(X',0);
XXb=v(:,1:r)*pls1b(s(1:r,1:r),u(:,1:r)'*y,K);
XEND_OF_FILE
Xif test 1200 -ne `wc -c <'svdpls1b.m'`; then
X echo shar: \"'svdpls1b.m'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1b.m'
Xfi
Xif test -f 'svdpls1c.m' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'svdpls1c.m'\"
Xelse
Xecho shar: Extracting \"'svdpls1c.m'\" \(1334 characters\)
Xsed "s/^X//" >'svdpls1c.m' <<'END_OF_FILE'
XXfunction[b] = svdpls1c(X,y,K)
XX%SVDPLS1C Univariate Partial Least Squares Regression.
XX% SVDPLS1C finds the K factor PLS1 regression of the vector y
XX% on X using a modification of an algorithm by Helland(1988).
XX% The problem is first converted into canonical form using the
XX% singular value decompostion. PLS1C is then applied to this
XX% canonical problem and the solution is transformed back to the
XX% original problem.
XX%
XX% References: Helland, I. S. (1988) On the structure of partial least
XX% squares regression. Communications in Statistics ---
XX% Elements of Simulation and Computation, 17, 581-607.
XX%
XX% Denham, M. C. (1994) Implementing Partial Least Squares.
XX% Statistics and Computing, (to appear).
XX%
XX% On entry:
XX% X should be a centred matrix of explanatory variables.
XX%
XX% y should be a centred vector of response variables whose
XX% length is equal to the number of rows of X.
XX%
XX% K is the number of PLS factors to be fitted in the PLS
XX% regression.
XX%
XX% See also: pls1a,pls1b,pls1c,svdpls1a,svdpls1b
XX%
XX% Copyright (c) 1994 M. C. Denham
XX% Email: snsdenhm@reading.ac.uk
XXsizex=size(X);
XXr=min(sizex(1)-1,sizex(2));
XX[v,s,u]=svd(X',0);
XXb=v(:,1:r)*pls1c(s(1:r,1:r),u(:,1:r)'*y,K);
XEND_OF_FILE
Xif test 1334 -ne `wc -c <'svdpls1c.m'`; then
X echo shar: \"'svdpls1c.m'\" unpacked with wrong size!
Xfi
X# end of 'svdpls1c.m'
Xfi
Xecho shar: End of shell archive.
Xexit 0
END_OF_FILE
if test 10948 -ne `wc -c <'matlab.sh'`; then
echo shar: \"'matlab.sh'\" unpacked with wrong size!
fi
# end of 'matlab.sh'
fi
if test -f 'splus.sh' -a "${1}" != "-c" ; then
echo shar: Will not clobber existing file \"'splus.sh'\"
else
echo shar: Extracting \"'splus.sh'\" \(28696 characters\)
sed "s/^X//" >'splus.sh' <<'END_OF_FILE'
X#! /bin/sh
X# This is a shell archive. Remove anything before this line, then unpack
X# it by saving it into a file and typing "sh file". To overwrite existing
X# files, type "sh file -c". You can also feed this as standard input via
X# unshar, or by typing "sh 'README' <<'END_OF_FILE'
XXSplus functions for performing univariate
XXPartial Least Squares Regression.
XX------------------------------------------------
XXTo install the software:
XX
XXsh pls.sh
XX# pick one of the following
XXS < pls1.s
XX#Splus < pls1.s
XX--------------------------
XX
XXPlease send bug reports to:
XXMike Denham snsdenhm@reading.ac.uk
XX+44 734 318 914 (FAX) +44 734 753169
XXDepartment of Applied Statistics, Harry Pitt Building
XXUniversity of Reading, Whiteknights Road, PO Box 240
XXReading RG6 2FN, United Kingdom.
XX
XX
XEND_OF_FILE
Xif test 500 -ne `wc -c <'README'`; then
X echo shar: \"'README'\" unpacked with wrong size!
Xfi
X# end of 'README'
Xfi
Xif test -f 'pls1.s' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'pls1.s'\"
Xelse
Xecho shar: Extracting \"'pls1.s'\" \(7478 characters\)
Xsed "s/^X//" >'pls1.s' <<'END_OF_FILE'
XX"pls1a"<-
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright (c) October 1993, Mike Denham.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Scores Algorithm for PLS (Martens and Naes, pp. 121--123)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX P <- matrix(0, dx[2], K)
XX Q <- numeric(K)
XX for(i in 1:K) {
XX w <- crossprod(X, y)
XX w <- w/sqrt(crossprod(w)[1])
XX W[, i] <- w
XX tee <- X %*% w
XX cee <- crossprod(tee)[1]
XX p <- crossprod(X, (tee/cee))
XX P[, i] <- p
XX q <- crossprod(y, tee)[1]/cee
XX Q[i] <- q
XX X <- X - tee %*% t(p)
XX y <- y - q * tee
XX }
XX W %*% solve(crossprod(P, W), Q)
XX}
XX"pls1b"<-
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Loadings Algorithm for PLS (Martens and Naes, pp. 123--125)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX tol <- 1e-10
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX Tee <- matrix(0, dx[1], K)
XX y0 <- y
XX for(i in 1:K) {
XX w <- crossprod(X, y)
XX w <- w/sqrt(crossprod(w)[1])
XX W[, i] <- w
XX tee <- X %*% w
XX Tee[, i] <- tee
XX Q <- qr.coef(qr(Tee[, 1:i], tol = tol), y0)
XX X <- X - tee %*% t(w)
XX y <- y0 - Tee[, 1:i, drop = F] %*% Q
XX }
XX W %*% Q
XX}
XX
XX"pls1c"<-
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright Mike Denham, October 1994.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Modified Helland Algorithm (Helland 1988 + Denham 1994)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX tol <- 1e-10
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX XW <- matrix(0, dx[1], K)
XX s <- crossprod(X, y)
XX W[, 1] <- s
XX XW[, 1] <- X %*% s
XX QR <- qr(XW[, 1], tol = tol)
XX r <- qr.resid(QR, y)
XX if(K > 1) {
XX for(i in 2:K) {
XX w <- crossprod(X, r)
XX W[, i] <- w
XX XW[, i] <- X %*% w
XX QR <- qr(XW[, 1:i], tol = tol)
XX r <- qr.resid(QR, y)
XX }
XX }
XX W %*% qr.coef(QR, y)
XX}
XX"svdpls1a"<-
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Scores Algorithm for PLS (Martens and Naes, pp. 121--123)
XX# using Singular Value Decomposition. (Uses a replacement version of svd
XX# which is more efficient when the number of columns is large relative to
XX# the number of rows.)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- my.svd(X)
XX X$v[, 1:r] %*% pls1a(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX"svdpls1b"<-
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Loadings Algorithm for PLS (Martens and Naes, pp. 123--125)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- svd(X)
XX X$v[, 1:r] %*% pls1b(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX"svdpls1c"<-
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1994.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Modified Helland Algorithm (Helland 1988 + Denham 1994)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- svd(X)
XX X$v[, 1:r] %*% pls1c(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX"my.svd"<-
XXfunction(x, nu = min(n, p), nv = min(n, p))
XX{
XX# Alternative to Singular Value Decomposition function svd
XX# Examines matrix n by p matrix x and if n < p obtains the svd
XX# by applying svd the transpose of x.
XX x <- as.matrix(x)
XX dmx <- dim(x)
XX n <- dmx[1]
XX p <- dmx[2]
XX transpose.x <- n < p
XX if(transpose.x) {
XX x <- t(x)
XX hold <- nu
XX nu <- nv
XX nv <- hold
XX }
XX cmplx <- mode(x) == "complex"
XX if(!(is.numeric(x) || cmplx))
XX stop("x must be numeric or complex")
XX if(!cmplx)
XX storage.mode(x) <- "double"
XX dmx <- dim(x)
XX n <- dmx[1]
XX p <- dmx[2]
XX mm <- min(n + 1, p)
XX mn <- min(dmx)
XX job <- (if(nv) 1 else 0) + 10 * (if(nu == 0) 0 else if(nu == mn)
XX 2
XX else if(nu == n)
XX 1
XX else stop("Invalid value for nu (must be 0, number of rows, or number of cols)"
XX ))
XX z <- .Fortran(if(!cmplx) "dsvdcs" else "zsvdcs",
XX x,
XX as.integer(n),
XX as.integer(n),
XX as.integer(p),
XX d = if(!cmplx) double(mm) else complex(mm),
XX if(!cmplx) double(p) else complex(p),
XX u = if(!cmplx) if(nu)
XX matrix(0, n, nu)
XX else 0 else if(nu)
XX matrix(as.complex(0), n, nu)
XX else as.complex(0),
XX as.integer(n),
XX v = if(!cmplx) if(nv)
XX matrix(0, p, p)
XX else 0 else if(nv)
XX matrix(as.complex(0), p, p)
XX else as.complex(0),
XX as.integer(p),
XX if(!cmplx) double(n) else complex(n),
XX as.integer(job),
XX info = integer(1))[c("d", "u", "v", "info")]
XX if(z$info)
XX stop(paste("Numerical error (code", z$info, ") in algorithm"))
XX if(cmplx) {
XX if(all(Im(z$d) == 0))
XX z$d <- Re(z$d)
XX else stop("a singular value has a nonzero imaginary part")
XX }
XX length(z$d) <- mn
XX if(nv && nv < p)
XX z$v <- z$v[, seq(nv)]
XX if(transpose.x) {
XX z <- z[c("d", if(nu) "u" else NULL, if(nv) "v" else NULL)]
XX names(z) <- names(z)[c(1, 3, 2)]
XX z
XX }
XX else {
XX z[c("d", if(nv) "v" else NULL, if(nu) "u" else NULL)]
XX }
XX}
XEND_OF_FILE
Xif test 7478 -ne `wc -c <'pls1.s'`; then
X echo shar: \"'pls1.s'\" unpacked with wrong size!
Xfi
X# end of 'pls1.s'
Xfi
Xif test ! -d '.Data' ; then
X echo shar: Creating directory \"'.Data'\"
X mkdir '.Data'
Xfi
Xif test ! -d '.Data/.Help' ; then
X echo shar: Creating directory \"'.Data/.Help'\"
X mkdir '.Data/.Help'
Xfi
Xif test -f '.Data/.Help/pls1a' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/pls1a'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/pls1a'\" \(2817 characters\)
Xsed "s/^X//" >'.Data/.Help/pls1a' <<'END_OF_FILE'
XX.BG
XX.FN pls1a
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using the Orthogonal Scores Algorithm.
XX.CS
XXpls1a(X, y, K=min(dx[1]-1,dx[2]))
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1994).
XXImplementing partial least squares.
XXStatistics and Computing (to appear)
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1b,pls1c,svdpls1a,svdpls1b,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright (c) October 1993, Mike Denham.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Scores Algorithm for PLS (Martens and Naes, pp. 121--123)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX P <- matrix(0, dx[2], K)
XX Q <- numeric(K)
XX for(i in 1:K) {
XX w <- crossprod(X, y)
XX w <- w/sqrt(crossprod(w)[1])
XX W[, i] <- w
XX tee <- X %*% w
XX cee <- crossprod(tee)[1]
XX p <- crossprod(X, (tee/cee))
XX P[, i] <- p
XX q <- crossprod(y, tee)[1]/cee
XX Q[i] <- q
XX X <- X - tee %*% t(p)
XX y <- y - q * tee
XX }
XX W %*% solve(crossprod(P, W), Q)
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2817 -ne `wc -c <'.Data/.Help/pls1a'`; then
X echo shar: \"'.Data/.Help/pls1a'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/pls1a'
Xfi
Xif test -f '.Data/.Help/pls1b' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/pls1b'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/pls1b'\" \(2824 characters\)
Xsed "s/^X//" >'.Data/.Help/pls1b' <<'END_OF_FILE'
XX.BG
XX.FN pls1b
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using the Orthogonal Loadings Algorithm.
XX.CS
XXpls1b(X, y, K=min(dx[1]-1,dx[2]))
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1994).
XXImplementing partial least squares.
XXStatistics and Computing (to appear)
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1a,pls1c,svdpls1a,svdpls1b,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Loadings Algorithm for PLS (Martens and Naes, pp. 123--125)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX tol <- 1e-10
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX Tee <- matrix(0, dx[1], K)
XX y0 <- y
XX for(i in 1:K) {
XX w <- crossprod(X, y)
XX w <- w/sqrt(crossprod(w)[1])
XX W[, i] <- w
XX tee <- X %*% w
XX Tee[, i] <- tee
XX Q <- qr.coef(qr(Tee[, 1:i], tol = tol), y0)
XX X <- X - tee %*% t(w)
XX y <- y0 - Tee[, 1:i, drop = F] %*% Q
XX }
XX W %*% Q
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2824 -ne `wc -c <'.Data/.Help/pls1b'`; then
X echo shar: \"'.Data/.Help/pls1b'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/pls1b'
Xfi
Xif test -f '.Data/.Help/pls1c' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/pls1c'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/pls1c'\" \(2919 characters\)
Xsed "s/^X//" >'.Data/.Help/pls1c' <<'END_OF_FILE'
XX.BG
XX.FN pls1c
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using a modified version of an
XXalgorithm given in Helland (1988)
XX.CS
XXpls1c(X, y, K=min(dx[1]-1,dx[2]))
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1992).
XXImplementing partial least squares.
XXTechnical Report. Liverpool University
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1a,pls1b,svdpls1a,svdpls1b,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K=min(dx[1]-1,dx[2]))
XX{
XX# Copyright Mike Denham, October 1994.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Modified Helland Algorithm (Helland 1988 + Denham 1994)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX tol <- 1e-10
XX X <- as.matrix(X)
XX dx <- dim(X)
XX W <- matrix(0, dx[2], K)
XX XW <- matrix(0, dx[1], K)
XX s <- crossprod(X, y)
XX W[, 1] <- s
XX XW[, 1] <- X %*% s
XX QR <- qr(XW[, 1], tol = tol)
XX r <- qr.resid(QR, y)
XX if(K > 1) {
XX for(i in 2:K) {
XX w <- crossprod(X, r)
XX W[, i] <- w
XX XW[, i] <- X %*% w
XX QR <- qr(XW[, 1:i], tol = tol)
XX r <- qr.resid(QR, y)
XX }
XX }
XX W %*% qr.coef(QR, y)
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2919 -ne `wc -c <'.Data/.Help/pls1c'`; then
X echo shar: \"'.Data/.Help/pls1c'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/pls1c'
Xfi
Xif test -f '.Data/.Help/svdpls1a' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/svdpls1a'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/svdpls1a'\" \(2475 characters\)
Xsed "s/^X//" >'.Data/.Help/svdpls1a' <<'END_OF_FILE'
XX.BG
XX.FN svdpls1a
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using the Orthogonal Scores Algorithm.
XX.CS
XXsvdpls1a(X, y, K=r)
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1994).
XXImplementing partial least squares.
XXStatistics and Computing (to appear)
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1a,pls1b,pls1c,svdpls1b,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Scores Algorithm for PLS (Martens and Naes, pp. 121--123)
XX# using Singular Value Decomposition. (Uses a replacement version of svd
XX# which is more efficient when the number of columns is large relative to
XX# the number of rows.)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- my.svd(X)
XX X$v[, 1:r] %*% pls1a(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2475 -ne `wc -c <'.Data/.Help/svdpls1a'`; then
X echo shar: \"'.Data/.Help/svdpls1a'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/svdpls1a'
Xfi
Xif test -f '.Data/.Help/svdpls1b' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/svdpls1b'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/svdpls1b'\" \(2396 characters\)
Xsed "s/^X//" >'.Data/.Help/svdpls1b' <<'END_OF_FILE'
XX.BG
XX.FN svdpls1b
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using the Orthogonal Loadings Algorithm.
XX.CS
XXsvdpls1b(X, y, K=r)
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1994).
XXImplementing partial least squares.
XXStatistics and Computing (to appear)
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1a,pls1b,pls1c,svdpls1a,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1993.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Orthogonal Loadings Algorithm for PLS (Martens and Naes, pp. 123--125)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- svd(X)
XX X$v[, 1:r] %*% pls1b(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2396 -ne `wc -c <'.Data/.Help/svdpls1b'`; then
X echo shar: \"'.Data/.Help/svdpls1b'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/svdpls1b'
Xfi
Xif test -f '.Data/.Help/svdpls1c' -a "${1}" != "-c" ; then
X echo shar: Will not clobber existing file \"'.Data/.Help/svdpls1c'\"
Xelse
Xecho shar: Extracting \"'.Data/.Help/svdpls1c'\" \(2410 characters\)
Xsed "s/^X//" >'.Data/.Help/svdpls1c' <<'END_OF_FILE'
XX.BG
XX.FN svdpls1c
XX.TL
XXUnivariate Partial Least Squares Regression
XX.DN
XXPerforms univariate partial least squares (PLS) regression of a vector on a
XXmatrix of explanatory variables using a modified version of an
XXalgorithm given in Helland (1988)
XX.CS
XXsvdpls1c(X, y, K=r)
XX.RA
XX.AG X
XXMatrix of explanatory variables. Each column represents a variable and
XXeach row an observation. The columns of this matrix are assumed to have been
XXcentred. The number of rows of `X' should equal the number of observations in
XX`y'. `NA's and `Inf's are not allowed.
XX.AG y
XXVector of responses. y is assumed to have been centred.
XX`NA's and `Inf's are not allowed.
XX.OA
XX.AG K
XXNumber of PLS factors to fit in the PLS regression. This must
XXbe less than or equal to the rank of `X'.
XX.RT
XXa vector of regression coefficients
XX.DT
XXUnivariate Partial Least Squares Regression is an example of a
XXregularised regression method. It creates a lower dimensional
XXrepresentation of the original explanatory variables and uses this
XXrepresentation in an ordinary least squares regression of the response
XXvariables. (cf. Principal Components Regression). Unlike Principal
XXComponents Regression, PLS regression chooses the lower dimensional
XXrepresentation of the original explanatory variables with reference to
XXthe response variable `y'.
XX.SH REFERENCES
XXDenham, M. C. (1992).
XXImplementing partial least squares.
XXTechnical Report. Liverpool University
XX.sp
XXHelland, I. S. (1988).
XXOn the Structure of partial least squares regression,
XXCommunications in Statistics, 17, pp. 581-607
XX.sp
XXMartens, H. and Naes, T. (1989).
XXMultivariate Calibration.
XXWiley, New York.
XX.SA
XXpls1a,pls1b,svdpls1a,svdpls1b,svdpls1c
XX.EX
XX# The function is currently defined as
XXfunction(X, y, K = r)
XX{
XX# Copyright Mike Denham, October 1994.
XX# Comments and Complaints to: snsdenhm@reading.ac.uk
XX#
XX# Modified Helland Algorithm (Helland 1988 + Denham 1994)
XX#
XX# X: A matrix which is assumed to have been centred so that columns
XX# sum to zero.
XX#
XX# y: A vector assumed to sum to zero.
XX#
XX# K: The number of PLS factors in the model which must be less than or
XX# equal to the rank of X.
XX#
XX# Returned Value is the vector of PLS regression coefficients
XX#
XX# tol is set as the tolerance for the QR decomposition in determining
XX# rank deficiency
XX#
XX X <- as.matrix(X)
XX r <- min(dim(X) - c(1, 0))
XX X <- svd(X)
XX X$v[, 1:r] %*% pls1c(diag(X$d[1:r]), crossprod(X$u[, 1:r], y), K)
XX}
XX.KW regression
XX.WR
XEND_OF_FILE
Xif test 2410 -ne `wc -c <'.Data/.Help/svdpls1c'`; then
X echo shar: \"'.Data/.Help/svdpls1c'\" unpacked with wrong size!
Xfi
X# end of '.Data/.Help/svdpls1c'
Xfi
Xecho shar: End of shell archive.
Xexit 0
END_OF_FILE
if test 28696 -ne `wc -c <'splus.sh'`; then
echo shar: \"'splus.sh'\" unpacked with wrong size!
fi
# end of 'splus.sh'
fi
echo shar: End of shell archive.
exit 0