## File: Size_empirical.R ## ## INPUT: ## Tot1_n3142.dat ## OUTPUT: ## p-values of the test statistic T (Pearson-type) and T(G) Gini-type ## ## IMPORTANT: All p-values computed by the MAIN code are for GINI-1 ACVF, ## as defined by Shelef and Schechtman ## ## REFERENCES: ## M.D. Carcea and R. Serfling, ## A Gini autocovariance function for time series modelling, ## J. Time Ser. Anal. 36 (2015), pp. 817--838. ## J.G. De Gooijer, Testing nonlinearity of heavy-tailed time series, ## J. Applied Statistics (2024), pp. . ## S.I. Resnick and E. Van den Berg, ## A test for nonlinearity of time series with infinite variance, ## Extremes 3 (2000), pp. 145--172. ## A. Shelef and E. Schechtman, ## A Gini-based time series analysis and test for reversibility, ## Statist. Papers 60 (2019), pp. 687--716. ## ################################################################# ################ 14 SUPPORTING FUNCTIONS ###################### test_generate <- function(x=DATA,P_alpha=a,h=h_opt){ # Pearson, no mean (n) correction n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] rho1 <- acf.htsid(x[1.:n], h, 1., 1.,T)$corr # Pearson no mean (n) correction # 1st subsample rho2 <- acf.htsid(x[(n + 1.):(2.*n)],h,1.,1.,T)$corr # 2nd subsample rd <- (n/log(n))^(1./P_alpha) * abs(rho1 - rho2 ) rd <- rd[1:h] m.P.n <- max(rd) # Pearson (P), no mean (n) correction mm <- m.P.n return(mm) } test_generate_G <- function(x=DATA,P_alpha=a,h=h_opt){ # Gini, no mean (n) correction x <- DATA P_alpha <- a h <- h_opt n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] rho1 <- g_f.n.x1(x[1.:n], h+1) # Gini (G) no mean (n) correction, 1st subsample rho2 <- g_f.n.x1(xx[1.:n], h+1) # acf.htsid(x[(n + 1.):(2.*n)],h,1.,1.,T)$corr rd.g1 <- (n/log(n))^(1./P_alpha) * abs(rho1 - rho2 ) rd.g1 <- rd.g1[1:h] m.g1.n <- max(rd.g1) # Gini, no mean correction mm <- m.g1.n return(mm) } test_Gaut_G <- function(x=DATA,P_alpha=a,h=h_opt){ # Gini, no mean (n) correction Lag = h nr = length(x) n <- length(x)/2 xxx = x[1:n] xx = x[(n+1):(2.*n)] sort.s = sort(x) Gautcov0.s = Gautcov_0(sort.s) ar.stand_1 = xxx Gautcov.stand.p_1 = Gautcov_EstimF1(N, ar.stand_1, Lag, "p") Gautcov.stand = c(Gautcov.stand.p_1[(Lag-1):1], Gautcov0.s) run.lag2 = matrix(1:Lag, byrow=TRUE, nrow=1) vect.part = function(x) {(Lag+1-x):(2*Lag-x)} index = apply(run.lag2, 2, vect.part) rho_G1 = c(Gautcov.stand.p_1) / c( Gautcov0.s) ar.stand = xx Gautcov.stand.p_2 = Gautcov_EstimF1(n, ar.stand, Lag, "p") run.lag2 = matrix(1:Lag, byrow=TRUE, nrow=1) index = apply(run.lag2, 2, vect.part) rho_G2 = c(Gautcov.stand.p_2) / c(Gautcov0.s) rd.g1 <- (nr/log(nr))^(1./P_alpha) * abs(rho_G1 - rho_G2 ) # TEST statistic rd.g1 <- rd.g1[1:h] m.g1.n <- max(rd.g1) # GINI (G), no (n) mean correction mm <- m.g1.n return(mm) } ############# PEARSON ######################### boot_NL <- function (x,val=0,M=500,b_size=10){ boo_NL = tsboot(tseries=x,statistic=test_generate,R=M,sim="fixed",l=b_size) # t0 If orig.t is TRUE then t0 is the result of statistic(tseries,...{}) # otherwise it is NULL. # t The results of applying statistic to the replicate time series. # R The value of R as supplied to tsboot. # l The value of l used for block based resampling. # This will be NULL if block based resampling was not used. rejects_95 = NULL # Bootstrapped distribution function: of abs( Test-value - Test_H0 ) stats_NL = sort(boo_NL$t) quant_95 = quantile(stats_NL,prob=0.95) quant_tot_1 = quantile(stats_NL,prob=seq(0,1,1/40)) # quant_tot_2 = quantile(stats_NL,prob=seq(0.92,1,1/500)) if( boo_NL$t0 > quant_95 ) { rejects_95=c(rejects_95,1) } else { rejects_95=c(rejects_95,0) } return(c(rejects_95, quant_tot_1)) } ################ GINI ############################ boot_NL_G <- function (x,val=0,M=500,b_size=10){ # boo_NL = tsboot(tseries=x,statistic=test_generate_G,R=M,sim="fixed",l=b_size) boo_NL = tsboot(tseries=x,statistic=test_Gaut_G,R=M,sim="fixed",l=b_size) # t0 If orig.t is TRUE then t0 is the result of statistic(tseries,...{}) # otherwise it is NULL. # t The results of applying statistic to the replicate time series. # R The value of R as supplied to tsboot. # l The value of l used for block based resampling. # This will be NULL if block based resampling was not used. rejects_95 = NULL # bootstrapped distribution function: of |( Test-value - Test_H0 )| stats_NL = sort(boo_NL$t) quant_95 = quantile(stats_NL,prob=0.95) quant_tot_G = quantile(stats_NL,prob=seq(0,1,1/40)) # quant_tot_2 = quantile(stats_NL,prob=seq(0.92,1,1/500)) if( boo_NL$t0 > quant_95 ) { rejects_95=c(rejects_95,1) } else { rejects_95=c(rejects_95,0) } return(c(rejects_95, quant_tot_G)) } rhotest_n <- function(x,alpha=a,h=20) # "classical PEARSON ACF", MEAN CORRECTION { # rho^{1} and rho^{2}, h = h_opt = number of lags n <- floor(length(x)/2.) # see Thm. 1, subsamples 1:n and n+1:2n # if(alpha > 1) { # rho1 <- acf_n(x[1:n], h, plot = FALSE)$acf[, , 1] # rho2 <- acf_n(x[(n + 1):(2*n)],h,plot=FALSE)$acf[, , 1] # rho1 <- acf(x[1:n],h,plot=FALSE,na.action=na.pass)$acf[, , 1] # with MEAN CORRECTION # rho2 <- acf(x[(n+1):(2*n)],h,plot=FALSE,na.action=na.pass)$acf[, , 1] # } else { rho1 <- acf.htsid(x[1.:n], h, 1., 1.,T)$corr # no mean (n) correction, 1st subsample rho2 <- acf.htsid(x[(n + 1.):(2.*n)],h,1.,1.,T)$corr # 2nd subsample # } rd <- (n/log(n))^(1./alpha) * abs(rho1-rho2) ## alpha = P_alpha m <- max(rd) # test statistic return(m) } rhotest.g1.n <- function(x,alpha=a,h=h_opt) # Gini-1, no mean (n) correction { # call to function: g_f.n.x1 n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] if(alpha > 1) { rho1.g1 <- g_f.n.x1(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.n.x1(xx[1:n],h+1) rho2.g1 <- rho2.g1[1:h+1] rho_g1 <- g_f.n.x1(x[1:length(x)],h+1) rho_g1 <- rho_g1[1:h+1] } else { rho1.g1 <- g_f.n.x1(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.n.x1(xx[1:n],h) rho2.g1 <- rho2.g1[1:h+1] rho_g1 <- g_f.n.x1(x[1:length(x)],h+1) rho_g1 <- rho_g1[1:h+1] } rd.g1 <- (n/log(n))^(1./alpha) * abs(rho1.g1-rho2.g1) rd.g1 <- rd.g1[1:h] m.g1.n <- max(rd.g1) return(m.g1.n) # test statistic } g_f.n.x1 <- function (x,lag.max=NULL,T){ # Gini-1, no mean (n) correction T <- length(x) # call to function: for_sum_acf.n if (is.null(lag.max)) lag.max <- floor(10 * (log10(T) )) lag.max <- min(lag.max, T - 1) if (lag.max < 0) stop("'lag.max' must be at least 1") g_x1.n <- rep(0,lag.max) g_x2.n <- rep(0,lag.max) rank_x <- rank(x[1:T])/length(x[1:T]) sum0 <- sum(x*rank_x) gacf_x_s <- apply(as.matrix(1:lag.max),1,for_sum_acf.n,x,T,lag.max)/sum0 sum1 <- apply(as.matrix(1:lag.max),1,for_sum_acf.n,x,T,lag.max) g_x1.n <- c(1,gacf_x_s[1,]) g_x2.n <- c(1,gacf_x_s[2,]) return(g_x1.n) } ########## function GACF.f (f = function) #################### g_f.x1 <- function (x,lag.max=NULL){ # Gini-1, mean correction T <- length(x) # call to function: for_sum_acf if (is.null(lag.max)) lag.max <- floor(10 * (log10(T) )) lag.max <- min(lag.max, T - 1) if (lag.max < 0) stop("'lag.max' must be at least 1") g_x1 <- rep(0,lag.max) g_x2 <- rep(0,lag.max) T1 <- length(x) mean_x <- mean(x) rank_x <- rank(x[1:T1])/length(x[1:T1]) sum0 <- sum((x-mean_x)*(rank_x-0.5)) gacf_x_s <- apply(as.matrix(1:lag.max),1,for_sum_acf,x,T1,mean_x,lag.max)/sum0 sum1 <- apply(as.matrix(1:lag.max),1,for_sum_acf,x,T1,mean_x,lag.max) g_x1 <- c(1,gacf_x_s[1,]) g_x2 <- c(1,gacf_x_s[2,]) return(g_x1) # used in: cdisp_g1 } for_sum_acf <- function(i,x,T1,mean_x,lag.max){ # with mean correction x2 = x[(i+1):T1] # called BY functions: g_f.x1 + g_f.n.x1 rank_x1 <- rank(x[1:(T1-i)])/(T1-i) sum1 <- sum((x2-mean_x)*(rank_x1-0.5)) x1 <- x[1:(T1-i)] rank_x2 <- rank(x[(i+1):T1])/(T1-i) sum2 <- sum((x1-mean_x)*(rank_x2-0.5)) rank_x3 <- rank(x[i:T1-i])/(T1-i) sum3 <- 2* sum( (2*i-(T1-lag.max)-1)*(rank_x3-0.5) )/((T1-lag.max)*(T1-lag.max-1)) rank_cs <- rank(x[i:T1-i])/(T1-i) sum4 <- 2* sum( (2*i-(T1-lag.max))*(rank_cs-0.5) )/((T1-lag.max)*(T1-lag.max)) return(c(sum1,sum2)) } for_sum_acf.n <- function(i,x,T1,lag.max){ # no mean correction x2 <- x[(i+1):T1] # called BY function: g_f.n.x1 rank_x1 <- rank(x[1:(T1-i)])/(T1-i) sum1.1 <- sum(x2*rank_x1) x1 <- x[1:(T1-i)] rank_x2 <- rank(x[(i+1):T1])/(T1-i) sum2.2 <- sum(x1*rank_x2) return(c(sum1.1,sum2.2)) } acf.htsid <- function(x,p,lower,upper,plot= FALSE) # ht = heavy tailed { # called by functions: sdisp, rhotest len <- length(x) den <- sum(x*x) max <- 0. corr <- matrix(0.,p,1) for(j in 1.:p) { corr[j] <- sum(x[1.:(len-j)]*x[(1.+j):len])/den } l <- max(abs(corr)) return(list(corr = corr,maxcorr=l)) } sdisp <- function(x, h, alpha) # simulation distribution function { # "classical ACF, T = MEAN CORRECTION" n <- length(x) # call to functions: acf + acf.htsid (F) if(alpha < 4./3.){ rn <- h + floor((n/2.)^0.499) } else { exp <- (2./alpha)-1. rn <- h + floor( (n/2.)^exp ) } if(alpha > 1) {rho <- c(1, acf(x,rn+h,plot = FALSE)$acf[, , 1]) } else { rho <- c(1., acf.htsid(x,rn+h, 0.,1., F)$corr)} if(h > 1.) {rho.nn <- c(rev(rho[2.:h]),rho)} sapply(1.:h, cdisp,rho.nn,rn,h,alpha) } cdisp <- function(l,rho,rn,h,alpha) # h = lag { y <- abs(rho[(h+l+1.):(h+l+rn)]+rho[(1.+(h-l)):(rn+(h-l))]-2.*rho[(h+1.):(h+rn)]*rho[h])^(alpha) return(y) } ################################################################ ################# MAIN ######################################### library(bootstrap) library(boot) ### DATA INPUT x = Tot1_n3142 # Ethernet-Traffice-n3142 data (1 sec) xx = ts(x,freq=1) z = as.numeric(xx) nr = length(z) # sample size h_opt = 20 # maximum lag h (Note: h_opt = m in paper) 5, 10, 20 CHANGE nrep = 10000 # no. of simulated limit vectors (\hat{Y}_{1},...,\hat{Y}_{h_opt}) a = 2.1 # a = alpha in paper: 1.1, 1.5, 2.1 CHANGE rn.2 = h_opt + floor((nr/2)^0.499) # r(n) in paper p = rn.2 + h_opt ################### Computation part #################################### ## Carcea-Serfling GINI (G) ACF; R = 500 = M in paper; NL = nonlinear ## boo_NL = tsboot(tseries=z,statistic=test_Gaut_G,R=500,sim="fixed",l=10) ## Replace Carcea-Serfling GINI ACF by Shelef-Schechtman GINI ACF boo_NL_rho = tsboot(tseries=z,statistic=rhotest_n,R=500,sim="fixed",l=10) pwr_P <- boo_NL_rho$t m_n_P <- boo_NL_rho$t0 # rhotest_n(x, alpha, h_opt) # NO mean correction # P = Pearson-type p_val_P <- length(pwr_P[pwr_P > m_n_P])/500 # p-value, "classical ACF" boo_NL_G <- tsboot(tseries=z,statistic=rhotest.g1.n,R=500,sim="fixed",l=10) pwr_G <- boo_NL_G$t m_n_G <- boo_NL_G$t0 # rhotest_n(x, alpha, h_opt) # NO mean correction Gini-type p_val_G <- length(pwr_G[pwr_G > m_n_G])/500 # p-value cat("par (alpha) = ", a,"\n\n") cat("h_opt (= m ) = ", h_opt,"\n\n") p_val_P_G = cbind(p_val_P, p_val_G) print("p-values test statistics T(Pareto-type) and T(Gini-type) ") p_val_P_G ## OUTPUT (For instance): p_val_P and p_val_G: 0.266 0.032 ## (see the p-values in Table 4 of the J. Appl. Stats. paper 2024) ################ END MAIN ############################# ############################################################ ############### IMPORTANT NOTE: #################################### ## The following 18 functions are NOT used by the MAIN algorithm. ## However, in addition to the above 14 supporting functions, some of ## these functions may be used for computing GINI-2 test results. Concomit = function (first, second) { # Coded by: Marcel D. Carcea # Reference: Contributions to Time Series Modeling under # First Order Moment Assumptions, Appendix B, # PhD thesis, The University of Texas at Dallas, 2014 # # Computes the concomitants for a pair of vectors. # The concomitants are needed in computing Gini-positive and negative ACVFs # Input: first = [T * 1] vector of X_{i} observations # second = [T * 1] vector of X_{i+1} observations # Output: concomit = [T * 1] vector of concomitants # common = c(second, first) pair = matrix(common, ncol=2) # Step 2: # matrix of (X_(i+1), X_i) pair ord = order(pair[,2], pair[,1]) # Step 3: # orders (ascending by default) the matrix concomit = pair[,1][ord] # 1'st component value that is concomitant to the t'th ordered 2'nd # components value # Step 4: return(concomit) } Gautcov_0 = function(sort) { # Coded by: Marcel D. Carcea # Reference: Contributions to Time Series Modeling under # First Order Moment Assumptions, Appendix B, # PhD thesis, The University of Texas at Dallas, 2014 # # Computes gamma^{(G)}(0)_{T}, G-ACVF at lag 0. # Input: sort = [T * 1] vector of ordered observations. # Output: Gautcov_0 = the desired G-ACVF(0) value. # Step 1 # Create the vector "2t-T" for all values of T T = length(sort) t = 1:T multi.o = 2*t-T multi.o = as.matrix(multi.o) # Step 2 # Multiply the vector (2t-T) by the vectr "sort" and # multiply by 2/T^2 Gautcov_0 = (2/(T)^2)*t(multi.o)%*%as.matrix(sort) return(Gautcov_0) } Gautcov_Negative = function (N, sort, concomit) { # Coded by: Marcel D. Carcea # Reference: Contributions to Time Series Modeling under # First Order Moment Assumptions, Appendix B, # PhD thesis, The University of Texas at Dallas, 2014 # # Computes gamma^{(G)}(k)_{T} negative G-ACVF, for k >= 1, # with "k' the number of lags # Input: N = sample size. # sort = [T * 1] vector of ordered values. # concomit = [T * 1] vector of concomitants. # Output: Gautcov_N = G-ACVF negative at lag k # # Step 1: diff = N - length(sort) Fn = ecdf(concomit) empirical = Fn(concomit) # Step 2: multi.c = (2*empirical - 1) multi.c = as.matrix(multi.c) # Step 3: # Multiply the vector in Step 2 by the vector of ordered values # and multiply by 2/(T-k) Gautcov_N = (2/(N-diff))*(t(multi.c)%*%as.matrix(sort)) return(Gautcov_N) } Gautcov_Positive = function (N, sort, concomit) { # Coded by: Marcel D. Carcea # Reference: Contributions to Time Series Modeling under # First Order Moment Assumptions, Appendix B, # PhD thesis, The University of Texas at Dallas, 2014 # # Computes gamma^{(G)}(k)_{T} positive ACVF, for k >= 1, # with "k' the number of lags # Input: N = sample size. # sort = [T * 1] vector of ordered values. # concomit = [T * 1] vector of concomitants. # Output: Gautcov_P = ACVFs positive at lag k # Step 1: diff = N - length(sort) Fn = ecdf(sort) empirical = Fn(sort) # Step 2: multi.c = (2*empirical - 1) multi.c = as.matrix(multi.c) # Step 3: # Multiply the vector in Step 2 by the vector of ordered values # and multiply by 2/(T-k) Gautcov_P = (2/(N-diff))*(t(multi.c)%*%as.matrix(concomit)) return(Gautcov_P) } sdisp.g1 <- function(x, h, alpha) # Gini-1 ACF, MEAN CORRECTION, h = lag { # call to functions: g_f.x1 + g_f.n.x1 n <- length(x) if(alpha < 4./3.) {rn.2 <- h + floor((n/2.)^0.499) } else { rn.2 <- h + floor((n/2.)^(2./alpha - 1.)) } if(alpha > 1) {rho.g1 <- g_f.x1(x,(rn.2+h)) # G1-ACF mean correction } else { rho.g1 <- g_f.n.x1(x,(rn.2+h)) } # G1-ACF No mean correction if(h > 1.) {rho.g1.nn <- c(rev(rho.g1[2.:h]),rho.g1)} sapply(1.:h, cdisp.g1,rho.g1.nn,rn.2,h,alpha) } sdisp.g2 <- function(x, h, alpha) # Gini-2 ACF, MEAN CORRECTION { # call to functions: g_f.x2 + g_f.n.x2 n <- length(x) if(alpha < 4./3.) {rn.2 <- h + floor((n/2.)^0.499) } else { rn.2 <- h + floor((n/2.)^(2./alpha - 1.)) } if(alpha > 1) {rho.g2 <- g_f.x2(x,(rn.2+h)) # G2-ACF mean correction } else { rho.g2 <- g_f.n.x2(x,(rn.2+h)) } # G2-ACF No mean correction if(h > 1.) {rho.g2.nn <- c(rev(rho.g2[2.:h]),rho.g2)} sapply(1.:h, cdisp.g2,rho.g2.nn,rn.2,h,alpha) } sdisp.g1.n <- function(x, h, alpha) # Gini-1, NO MEAN CORRECTION, h = lag { # call to: g_f.n.x1 n <- length(x) if(alpha < 4./3.) {rn.2 <- h + floor((n/2.)^0.499) } else { rn.2 <- h + floor((n/2.)^(2./alpha - 1.)) } if(alpha > 1) {rho.g1 <- g_f.n.x1(x,(rn.2+h)) # G1-ACF NO mean correction } else { rho.g1 <- g_f.n.x1(x,(rn.2+h)) } # G1-ACF No mean correction if(h > 1.) {rho.g1.nn <- c(rev(rho.g1[2.:h]),rho.g1)} sapply(1.:h, cdisp.g1,rho.g1.nn,rn.2,h,alpha) } sdisp.g2.n <- function(x, h, alpha) # Gini-1, NO MEAN CORRECTION, h = lag { # call to functions: g_f.n.x2 n <- length(x) if(alpha < 4./3.) {rn.2 <- h + floor((n/2.)^0.499) } else { rn.2 <- h + floor((n/2.)^(2./alpha - 1.)) } if(alpha > 1) {rho.g2 <- g_f.n.x2(x,(rn.2+h)) # G1-ACF NO mean correction } else { rho.g2 <- g_f.n.x2(x,(rn.2+h)) } # G1-ACF No mean correction if(h > 1.) {rho.g2.nn <- c(rev(rho.g2[2.:h]),rho.g2)} sapply(1.:h, cdisp.g1,rho.g2.nn,rn.2,h,alpha) } cdisp.g1 <- function(l,g_x1,rn.2,h,alpha) # g_x1 = ACF G1 in g_f.x1 { y_g1 <- abs(g_x1[(h+l+1.):(h+l+rn.2)]+g_x1[(1.+(h-l)):(rn.2+(h-l))]-2.*g_x1[(h+1.):(h+rn.2)]*g_x1[h])^(alpha) return(y_g1) } cdisp.g2 <- function(l,g_x2,rn.2,h,alpha) # g_x2 = ACF G2 in g_f.x2 { y_g2 <- abs(g_x2[(h+l+1.):(h+l+rn.2)]+g_x2[(1.+(h-l)):(rn.2+(h-l))]-2.*g_x2[(h+1.):(h+rn.2)]*g_x2[h])^(alpha) return(y_g2) } cdisp.g1.n <- function(l,g_x1.n,rn.2,h,alpha) # g_x1.n = ACF G1 in g_f.n.x1 { y_g1.n <- abs(g_x1.n[(h+l+1.):(h+l+rn.2)]+g_x1.n[(1.+(h-l)):(rn.2+(h-l))]-2.*g_x1.n[(h+1.):(h+rn.2)]*g_x1.n[h])^(alpha) return(y_g1.n) } cdisp.g2.n <- function(l,g_x2.n,rn.2,h,alpha) # g_x2.n = ACF G2 in g_f.n.x2 { y_g2.n <- abs(g_x2.n[(h+l+1.):(h+l+rn.2)]+g_x2.n[(1.+(h-l)):(rn.2+(h-l))]-2.*g_x2.n[(h+1.):(h+rn.2)]*g_x2.n[h])^(alpha) return(y_g2.n) } Gautcov_EstimF1 = function(N, ar.mat, Lag, sign) { # Coded by: Marcel D. Carcea # Reference: Contributions to Time Series Modeling under # First Order Moment Assumptions, Appendix B, # PhD The University of Texas at Dallas, 2014 # # Uses: concomit, Lag_vect and Gautcov_Positive functions # Input: N = sample size # ar.mat = [2 * (N-lag)] matrix of AR(p) data # Lag = size of the lag # sign = "n" for negative G-ACVF; # any other value for positive G-ACVF # Output: Gautcov.h = vector G-ACVFs up to lag k. # # Step 1: # Computing lag k G-ACF by using concomit, Lag_vect and Gaut_Positive functions Pos_Lag = function(L){ concomit.pl = Concomit(Lag_vect(L,ar.mat)[1,], Lag_vect(L,ar.mat)[2,]) sort.pl = sort(Lag_vect(L, ar.mat)[1,]) Gautcov.pl = Gautcov_Positive(N, sort.pl, concomit.pl) Gautcov.pl } # Step 2: # Computing lag k G-ACF by using concomit, Lag_vect and Gaut_Negative functions Neg_Lag = function(L) { concomit.nl = Concomit(Lag_vect(L,ar.mat)[1,], Lag_vect(L,ar.mat)[2,]) sort.nl = sort(Lag_vect(L, ar.mat)[1,]) Gautcov.nl = Gautcov_Negative(N, sort.nl, concomit.nl) Gautcov.nl } # Step 3: run.lag = matrix(1:Lag, byrow=TRUE, nrow=1) if (sign=="n") {Gautcov.h = apply(run.lag, 2, Neg_Lag)} else {Gautcov.h = apply(run.lag, 2, Pos_Lag)} Gautcov.h } rhotest <- function(x,alpha,h) # "classical ACF", T = MEAN CORRECTION { # rho^{1} and rho^{2}, h = number of lags n <- floor(length(x)/2.) # see Thm. 4.1, subsamples 1:n and n+1:2n if(alpha > 1) { rho1 <- acf(x[1:n], h, plot = FALSE)$acf[, , 1] rho2 <- acf(x[(n + 1):(2*n)],h,plot=FALSE)$acf[, , 1] # rhoP <- acf(x[1:length(x)], h, plot = FALSE)$acf[, , 1] } else { rho1 <- acf.htsid(x[1.:n], h, 1., 1.,T)$corr # T = TRUE for mean correction rho2 <- acf.htsid(x[(n + 1.):(2.*n)],h,1.,1.,T)$corr # rhoP <- acf(x[1:length(x)], h, plot = FALSE)$acf[, , 1] # NO mean correction } rd <- (n/log(n))^(1./alpha) * abs(rho1-rho2) m <- max(rd) # test statistic return(m) # x = c(rho1) # CHANGE # X = c(rho2) # ,, # m = rbind(rho1,rho2) # CHANGE # m = matrix(c(x,X),ncol=2) # ,, } rhotest.g1 <- function(x,alpha,h) # Gini-1 MEAN CORRECTION { # call to functions: g_f.x1 + g_f.n.x1 n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] if(alpha > 1) { rho1.g1 <- g_f.x1(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.x1(xx[1:n],h+1) rho2.g1 <- rho2.g1[1:h+1] } else { rho1.g1 <- g_f.n.x1(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.n.x1(xx[1:n],h) rho2.g1 <- rho2.g1[1:h+1] } rd.g1 <- (n/log(n))^(1./alpha) * abs(rho1.g1-rho2.g1) rd.g1 <- rd.g1[1:h] m.g1 <- max(rd.g1) return(m.g1) # test statistic CHANGE # x = c(rho1.g1) # CHANGE # X = c(rho2.g1) # ,, # m = matrix(c(x,X),ncol=2) # ,, } rhotest.g2 <- function(x,alpha,h) # Gini-2 MEAN CORRECTION { # call to functions: g_f.x2 + g_f.n.x2 n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] if(alpha > 1) { rho1.g2 <- g_f.x2(x[1:n],h+1) rho1.g2 <- rho1.g2[1:h+1] rho2.g2 <- g_f.x2(xx[1:n],h+1) rho2.g2 <- rho2.g2[1:h+1] } else { rho1.g2 <- g_f.n.x2(x[1:n],h+1) rho1.g2 <- rho1.g2[1:h+1] rho2.g2 <- g_f.n.x2(xx[1:n],h) rho2.g2 <- rho2.g2[1:h+1] } rd.g2 <- (n/log(n))^(1./alpha) * abs(rho1.g2-rho2.g2) rd.g2 <- rd.g2[1:h] m.g2 <- max(rd.g2) return(m.g2) # test statistic } rhotest.g2.n <- function(x,alpha,h) # Gini-1 NO MEAN CORRECTION { # call to function: g_f.n.x2 n <- floor(length(x)/2.) xx <- x[(n+1.):(2.*n)] if(alpha > 1) { rho1.g1 <- g_f.n.x2(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.n.x2(xx[1:n],h+1) rho2.g1 <- rho2.g1[1:h+1] rho_g2 <- g_f.n.x2(x[1:length(x)],h+1) rho_g2 <- rho_g2[1:h+1] } else { rho1.g1 <- g_f.n.x2(x[1:n],h+1) rho1.g1 <- rho1.g1[1:h+1] rho2.g1 <- g_f.n.x2(xx[1:n],h) rho2.g1 <- rho2.g1[1:h+1] rho_g2 <- g_f.n.x2(x[1:length(x)],h+1) rho_g2 <- rho_g2[1:h+1] } rd.g1 <- (n/log(n))^(1./alpha) * abs(rho1.g1-rho2.g1) rd.g1 <- rd.g1[1:h] m.g2.n <- max(rd.g1) # m.g2.n # test statistic # m.g2.n <- rho_g2 } ## require(CircStats) # used for "rstable": rvs from the # stable family of prob. functions # Alternatively, use package: `stabledist' # require(stabledist) ## library(actuar) # for Pareto3 rvs stabmat <- function(alpha,N,rn) # N = nrep, call to: "rstable" # u = S_{j} and S_{j}^{'} rvs, i.i.d. with "alpha" # v = S_{0} and S_{0}^{'} rvs, independent stable with "alpha/2" # Output: (4.1) in Resnick-Van den Berg (2000) # { u <- (matrix(rstable(N*2.*rn,alpha,1),nrow=N)) # +tan((pi*alpha)/2.))* # u <- (matrix(rpareto3(N*2.*rn,0,alpha,1),nrow=N)) # (cos((pi*alpha)/2.)*gamma(1.-alpha))^(1./alpha) v <- (matrix(rstable(N*2.,alpha/2.,1),ncol=2.) ) # +tan((pi*alpha)/4.))* # (cos((pi*alpha)/4.)*gamma(1.-alpha/2.))^(2./alpha) # v <- (matrix(rpareto3(N*2.*rn,0,alpha/2,1),nrow=2.)) u[,1.:rn]/v[,1.]-u[,(rn+1.):(2.*rn)]/v[,2.] }