Wald检验只需要估计无约束模型
Wald-F检验需要估计有约束、跟无约束的两个模型
下面贴出matlab jplv7工具箱中的waldf函数,在\jplv7\regress目录下
function [fstat, fprb] = waldf(resultr,resultu)
% PURPOSE: computes Wald F-test for two regressions
%---------------------------------------------------
% USAGE: [fstat fprob] = waldf(resultr,resultu)
% or: waldf(resultr,resultu), which prints to the screen
% Where: resultr = results structure from ols() restricted regression
% resultu = results structure from ols() unrestrcted regression
%---------------------------------------------------
% RETURNS: fstat = {(essr - essu)/#restrict}/{essu/(nobs-nvar)}
% fprb = marginal probability for fstat
% NOTE: large fstat => reject the restrictions as inconsisent
% with the data
%---------------------------------------------------
% SEE ALSO: ols()
%---------------------------------------------------
% written by:
% James P. LeSage, Dept of Economics
% University of Toledo
% 2801 W. Bancroft St,
% Toledo, OH 43606
%
jlesage@spatial-econometrics.com
pflag = 0;
if nargout == 0
pflag = 1;
elseif nargin ~= 2 % flag incorrect arguments
error('waldf: Wrong # of input arguments');
elseif isstruct(resultu) == 0
error('waldf requires an ols results structure as input');
elseif isstruct(resultr) == 0
error('waldf requires an ols results structure as input');
end;
% get nobs, nvar from unrestricted and restricted regressions
nu = resultu.nobs; nr = resultr.nobs;
ku = resultu.nvar; kr = resultr.nvar;
if nu ~= nr
error('waldf: the # of obs in the results structures are different');
end;
if (ku - kr) < 0 % flag reversed input arguments
error('waldf: negative dof, check for reversed input arguments');
end;
% recover residual sum of squares from .sige field of the result structure
epeu = resultu.sige*(nu-ku); eper = resultr.sige*(nr-kr);
numr = ku - kr; % find # of restrictions
ddof = nu-ku; % find denominator dof
fstat1 = (eper - epeu)/numr; % numerator
fstat2 = epeu/(nu-ku); % denominator
fstat = fstat1/fstat2; fprb = fdis_prb(fstat,numr,ddof);
if pflag == 1
fprintf(1,'Wald F-statistic = %16.8f \n',fstat);
fprintf(1,'probability = %16.4f \n',fprb);
fprintf(1,'num,denom dof = %4d,%4d\n',numr,(nu-ku));
end;
其中涉及的ols函数,在同一个文件夹下面
代码如下:
function results=ols(y,x)
% PURPOSE: least-squares regression
%---------------------------------------------------
% USAGE: results = ols(y,x)
% where: y = dependent variable vector (nobs x 1)
% x = independent variables matrix (nobs x nvar)
%---------------------------------------------------
% RETURNS: a structure
% results.meth = 'ols'
% results.beta = bhat (nvar x 1)
% results.tstat = t-stats (nvar x 1)
% results.bstd = std deviations for bhat (nvar x 1)
% results.yhat = yhat (nobs x 1)
% results.resid = residuals (nobs x 1)
% results.sige = e'*e/(n-k) scalar
% results.rsqr = rsquared scalar
% results.rbar = rbar-squared scalar
% results.dw = Durbin-Watson Statistic
% results.nobs = nobs
% results.nvar = nvars
% results.y = y data vector (nobs x 1)
% results.bint = (nvar x2 ) vector with 95% confidence intervals on beta
%---------------------------------------------------
% SEE ALSO: prt(results), plt(results)
%---------------------------------------------------
% written by:
% James P. LeSage, Dept of Economics
% University of Toledo
% 2801 W. Bancroft St,
% Toledo, OH 43606
%
jlesage@spatial-econometrics.com
%
% Barry Dillon (CICG Equity)
% added the 95% confidence intervals on bhat
if (nargin ~= 2); error('Wrong # of arguments to ols');
else
[nobs nvar] = size(x); [nobs2 junk] = size(y);
if (nobs ~= nobs2); error('x and y must have same # obs in ols');
end;
end;
results.meth = 'ols';
results.y = y;
results.nobs = nobs;
results.nvar = nvar;
if nobs < 10000
[q r] = qr(x,0);
xpxi = (r'*r)\eye(nvar);
else % use Cholesky for very large problems
xpxi = (x'*x)\eye(nvar);
end;
results.beta = xpxi*(x'*y);
results.yhat = x*results.beta;
results.resid = y - results.yhat;
sigu = results.resid'*results.resid;
results.sige = sigu/(nobs-nvar);
tmp = (results.sige)*(diag(xpxi));
sigb=sqrt(tmp);
results.bstd = sigb;
tcrit=-tdis_inv(.025,nobs);
results.bint=[results.beta-tcrit.*sigb, results.beta+tcrit.*sigb];
results.tstat = results.beta./(sqrt(tmp));
ym = y - mean(y);
rsqr1 = sigu;
rsqr2 = ym'*ym;
results.rsqr = 1.0 - rsqr1/rsqr2; % r-squared
rsqr1 = rsqr1/(nobs-nvar);
rsqr2 = rsqr2/(nobs-1.0);
if rsqr2 ~= 0
results.rbar = 1 - (rsqr1/rsqr2); % rbar-squared
else
results.rbar = results.rsqr;
end;
ediff = results.resid(2:nobs) - results.resid(1:nobs-1);
results.dw = (ediff'*ediff)/sigu; % durbin-watson