nlls6.m (1757B)
1 function [u,beta,se,tstat]=nlls6(uname,beta0,X,options,lb,ub); 2 3 % function [u,beta,se,tstat]=nlls6(uname,beta0,X,options,lb,ub); 4 % 5 % nlls6.m 4/17/96 (from phinlls.m in ~/rad). 6 % 7 % nlls6 -- uses lsqnonlin rather than fminunc -- much faster. 8 % 9 % nlls5 is just nlls.m, updated for Version 5 of matlab (which uses 10 % optimset and fminsearch/fminunc) as new optimization routines. 11 % 12 % A generic NLLS routine, to be adapted to whichever problem is at hand 13 % 14 % beta0=initial guess 15 % X = [y x] 16 % 17 % Model: uname generates the residuals e.g. u=y-m(x,beta); 18 % Note: lsqnonlin cannot pass parameters, so have to make X=[y x] global. 19 % 20 % options should be optimset('lsqnonlin'); 21 % lb,ub = vector of lower and upper bounds; 22 23 24 if exist('options')~=1; options=optimset('lsqnonlin'); end; 25 if isempty(options); options=optimset('lsqnonlin'); end; 26 if exist('lb')~=1; lb=[]; end; 27 if exist('ub')~=1; ub=[]; end; 28 29 [beta,resnorm,u,exitflag,output,lambda,jacobian]=lsqnonlin(uname,beta0,lb,ub,options); 30 31 % Now calculate standard errors using the Gradients 32 [N col]=size(X); 33 y=X(:,1); 34 %x=X(:,2:col); 35 k=length(beta); 36 % M=gradp(mname,beta,[],x); 37 M=-jacobian; 38 if issparse(M); 39 M=full(M); 40 end; 41 sigma2=resnorm/(N-k); 42 mminv=inv(M'*M); 43 vcv=sigma2*mminv; 44 se=sqrt(diag(vcv)); 45 tstat=beta./se; 46 47 % White robust vcv 48 f=mult(M,u); 49 Sw=f'*f/(N-k); 50 robvcv=N*mminv*Sw*mminv; 51 robse=sqrt(diag(robvcv)); 52 robt =beta./robse; 53 54 ybar=1/N*(ones(1,N)*y); 55 %rsq=1-(u'*u)/(y'*y - N*ybar^2); 56 rsq=1-(resnorm)/(y'*y - N*ybar^2); 57 58 fprintf('R-Squared: %7.4f\n',rsq); 59 fprintf('uu: %7.4f\n',resnorm); 60 disp ' Beta S.E. t-stat WhiteSE WhiteT'; 61 disp '------------------------------------------------------------'; 62 cshow(' ',[beta se tstat robse robt],'%12.6f'); 63