lec03

Size: px
Start display at page:

Download "lec03"

Transcription

1

2 # Previous inappropriate version tol = 1e-7; grad = 1e10; lambda = 0.2; gamma = 0.9 x = 10; x.hist = x; v = 0 while (abs(grad)>tol){ grad = 2*x+2 v = gamma*v-lambda*grad x = x + v x.hist=c(x.hist,x) print(c(x,grad)) x.temp=seq(-10,10,0.1) plot(x.temp, x.temp^2+2*x.temp,type='l',lwd=2) lines(x.hist,x.hist^2+2*x.hist,type='o',pch=1,col='red',cex=1) [1] [1] [1] e e-15

3 # New appropriate version tol = 1e-7; grad = 1e10; lambda = 0.2; gamma = 0.9 x = 10; x.hist = x; v = 0 repeat { grad = 2*x+2 if (abs(grad) <= tol){break v = gamma*v-lambda*grad x = x + v x.hist=c(x.hist,x) print(c(x,grad)) x.temp=seq(-10,10,0.1) plot(x.temp, x.temp^2+2*x.temp,type='l',lwd=2) lines(x.hist,x.hist^2+2*x.hist,type='o',pch=1,col='red',cex=1) [1] [1]

4

5 naive.stochoptim <- function(obj.func, x, n.iter, width){ opt.value <- do.call(obj.func, list(x)) opt.hist = matrix(0, nrow = n.iter, ncol = 5) opt.hist[1,] = c(x, x, opt.value, opt.value, 1) for (i.iter in 2:n.iter){ accpet = 0 temp.x <- x + rnorm(1, mean = 0, sd = width) temp.value <- do.call(obj.func, list(temp.x)) if (temp.value < opt.value){ x = temp.x opt.value = temp.value accept = 1 opt.hist[i.iter, ] = c(x, temp.x, opt.value, temp.value, 1) return(data.frame(opt.hist))

6 set.seed(50) n.iter =500 fun01<-function(x){x^2+2*x res <- naive.stochoptim(fun01,3,n.iter,1) > head(res) X1 X2 X3 X4 X

7

8 x "#$ = x + g Δx. T Δf = f x "#$ f x P x "#$, T = T ηt 0# T: temperature c: problem specific constant P: probability of accepting hypothetical x Eta: annealing constant (<1)

9 Δf = f x "#$ P x "#$, T = f x exp Δf ct f x "#$ x "#$ f x "#$ x "#$ x "#$ x "#$

10 SimAneal01<-function(func, initxy, maxitr=1000, C=1, eta=0.99, width=10) { x=initxy opt.value = do.call(func,list(x)) n.var = length(x) opt.hist=matrix(0, nrow=maxitr, ncol=5) opt.hist[1,]=c(x,x,opt.value,opt.value,0) for (i_loop in 2:maxItr) { accept = 0 temp.x = x + rnorm(n.var, mean = 0, sd=width) temp.value= do.call(func,list(temp.x)) delta=temp.value-opt.value; prob=1(1+exp(delta(c*width))) if (runif(1) < prob) { x = temp.x; opt.value = temp.value; accept = 1 opt.hist[i_loop,]=c(x, temp.x, opt.value, temp.value, accept); width=width*eta return(data.frame(opt.hist))

11 set.seed(50) n.iter =500 fun01<-function(x){x^2+2*x res <- SimAneal01(fun01, 3, n.iter, 1, 0.985, 1) > head(res) X1 X2 X3 X4 X

12

13

14 model IV1 IV2 IV3 IV4 IV5 IV6 IV7 IV8 IV9 IV

15 set.seed(19) nvar = 10 ndata = 50 # X=matrix(rnorm(nVar*nData,mean=10,sd=2),ncol=nVar); # y=x%*%c(-2,0,2,0,2,0,-3,0,-2,0)+rnorm(ndata,mean=0,sd=4); # memo # %*%

16 > summary(lm(y~x)) Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) X e-13 *** X X e-12 *** X X e-12 *** X X < 2e-16 *** X X e-15 *** X Signif. codes: 0 *** ** 0.01 * Residual standard error: on 39 degrees of freedom Multiple R-squared: , Adjusted R-squared: F-statistic: on 10 and 39 DF, p-value: < 2.2e-16

17 > summary(lm(y~x[,seq(1,9,2)])) Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) X[, seq(1, 9, 2)] e-16 *** X[, seq(1, 9, 2)] e-14 *** X[, seq(1, 9, 2)] e-14 *** X[, seq(1, 9, 2)] < 2e-16 *** X[, seq(1, 9, 2)] < 2e-16 *** --- Signif. codes: 0 *** ** 0.01 * Residual standard error: 5.74 on 44 degrees of freedom Multiple R-squared: , Adjusted R-squared: 0.95 F-statistic: on 5 and 44 DF, p-value: < 2.2e-16

18 ß

19 ß 0 0 X X ß

20 GA_recomb<-function(G) { npop=nrow(g); nvar=ncol(g); child = G; # parent 1 G.permuted = G[sample(1:nPop),] # parent 2 recomb.idx = which(matrix(sample(0:1,npop*nvar,replace=t), nrow=npop)==1) # crossover idx child[recomb.idx] = G.permuted[recomb.idx] return(child)

21 GA_mutate = function(child, p){ n.pop = nrow(child) n.var = ncol(child) mut.mat= matrix((runif(n.pop*n.var) < p), nrow = n.pop) child = abs(child-mut.mat) # abs(0-1) -> 1; abs(1-1) -> 0 # 0 -> 1; 1 -> 0 return(child)

22 GA_survive<-function(G, child, fitg, fitc){ npop=nrow(g); fitt=c(fitg,fitc); fitmax=sort(fitt, decreasing = TRUE, index.return=true) # maximize adjst. R^2 tempx=rbind(g,child); G=tempX[fitMax$ix[1:nPop],] return(g)

23 See courselog for MAIN function # RESULT > res<-ga(g,100,0.1,x,y) > head(res$g) [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [1,] [2,] [3,] [4,] [5,] [6,]

24 niv = GHI _K"L#EDMLKN" A KO N CDE i model x1 x2 x3 x1*x2 x1*x3 x2*x3 x1*x2*x

25 T niv = A 10 i KO = 637

26 Combn(Nvar, Nint) > combn(3,2) [,1] [,2] [,3] [1,] [2,] > combn(3,3) [,1] [1,] 1 [2,] 2 [3,] 3

27 C = combn(3,2) > paste("x",c,sep="") [1] "X1" "X2" "X1" "X3" "X2" "X3 > temp.lab =tostring(paste("x",c[, i.comb], sep = "")) [1] "X1, X2" > gsub(",", "*", temp.lab) [1] "X1* X2 n.comb = ncol(c) for (i.comb in 1: n.comb){ temp.label = tostring(paste("x",c[, i.comb], sep = "")) var.labels = c(var.labels, gsub(",", "*", temp.label) ) > var.labels [1] "X1* X2" "X1* X3" "X2* X3"

28 mk.labels <- function(c){ var.labels = c() n.comb = ncol(c) for (i.comb in 1: n.comb){ temp.label = tostring(paste("x",c[, i.comb], sep = "")) var.labels = c(var.labels, gsub(",", "*", temp.label) ) return(var.labels)

29 n.var = 3 # single variable var.labels = paste("x", 1:n.var, sep = "") # interaction terms for (i.interaction in 2:max.interaction ){ combination = combn(n.var, i.interaction) var.labels = c(var.labels, mk.labels(combination)) > var.labels [1] "X1 "X2" "X3" "X1* X2" "X1* X3" "X2* X3" "X1* X2* X3"

30 # creating formula for Linear Model model.def = paste("y ~", gsub(",", "+", tostring(var.labels[idx == 1]))) # example > model.def = paste("y ~", gsub(",", "+", tostring(var.labels[c(1,1,1,1,1,0,0) == 1]))) > model.def [1] "Y ~ X1+ X2+ X3+ X1* X2+ X1* X3 # running Linear Model model.lm = lm(model.def, data)

31 set.seed(20) nvar = 3 ndata = 50 X=matrix(rnorm(nVar*nData,mean=10,sd=5),ncol=nVar); y=x%*%c(1,1,-2)+rnorm(ndata,mean=0,sd=3); dat = data.frame(y=y,x1=x[,1],x2=x[,2],x3=x[,3]) # example model.def1 = paste("y ~", gsub(",", "+", tostring(var.labels[c(1,1,1,0,0,0,0) == 1]))) model.lm = lm(model.def1, dat)

32 # example model.def1 = paste("y ~", gsub(",", "+", tostring(var.labels[c(1,1,1,0,0,0,0) == 1]))) model.lm = lm(model.def1, dat) > summary(model.lm) Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) X e-15 *** X e-14 *** X < 2e-16 *** --- Signif. codes: 0 *** ** 0.01 * Residual standard error: on 46 degrees of freedom Multiple R-squared: , Adjusted R-squared: F-statistic: on 3 and 46 DF, p-value: < 2.2e-16

33 model IV1 IV2 IV3 IV4 IV5 IV6 IV7 IV8 IV9 IV10 1 c11 c12 c13 c14 c15 c16 c17 c18 c19 c110 2 c21 c22 c23 c24 c25 c26 c27 c28 c29 c210 Npop

34 set.seed(20); ndata = 100 X=matrix(rnorm(9*nData,mean=10,sd=2),ncol=9);X=cbind(rep(1,nData),X) y=x%*%c(10,2,5,-3,-5,0,0,0,0,0)+rnorm(ndata,mean=0,sd=2); > summary(lm(y~x[,2:10])) Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) ** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] X[, 2:10] X[, 2:10] X[, 2:10] X[, 2:10]

35

36 θ MVKWX K = Y θ 2DE#"L K if UNI K < 0.5 θ 2DE#"L] K otherwise σ K MVKWX = ] σ 2DE#"L 2DE#"L] K + σ K

37 σ K σ K exp τn 0,1 θ K θ K + N 0, σ K exp τn 0,1

38 θ MVKWX K = Y θ 2DE#"L K if UNI K < 0.5 θ 2DE#"L] K otherwise σ MVKWX K = 1 2 σ 2DE#"L 2DE#"L] K + σ K ES_recomb<-function(G) { nparent=nrow(g$x); nvar=ncol(g$x); child = G for (i_child in 1:nChild) { parentid=sample(1:nparent,2) coid=sample(c(0,1),nvar,replace=t) child$x[i_child,]=g$x[parentid[1],] child$x[i_child,which(coid==1)]=g$x[parentid[2],which(coid==1)] child$sigma[i_child,]=0.5*(g$sigma[parentid[1],]+g$sigma[parentid[2],]) return(child)

39 σ K σ K exp τn 0,1 θ K θ K + N 0, σ K ES_mutate<-function(child,tau){ nchild=nrow(child$x);nvar=ncol(child$x) child$sigma<-child$sigma*exp(matrix(rnorm(nchild*nvar)*tau,nrow=nchild)) child$x=child$x+child$sigma*matrix(rnorm(nchild*nvar),nrow=nchild,ncol=nvar) return(child)

40 ES_survive<-function(G, child, fitg, fitc){ nparent=nrow(g$x); fitt=c(fitg,fitc); fitmin=sort(fitt,index.return=t) tempx=rbind(g$x,child$x); temps=rbind(g$sigma,child$sigma) G$x=tempX[fitMin$ix[1:nParent],] G$sigma=tempS[fitMin$ix[1:nParent],] return(g)

41

42 set.seed(20); ndata = 100 X=matrix(rnorm(9*nData,mean=10,sd=2),ncol=9);X=cbind(rep(1,nData),X) y=x%*%c(10,2,5,-3,-5,0,0,0,0,0)+rnorm(ndata,mean=0,sd=2); fun04<-function(b,x,y){ yhat<-x%*%b return(sum((y-yhat)^2))

43 > res=es(g, fun04, 10000, 2,X,y) > head(round(res$g$x,3)) [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [1,] [2,] > summary(lm(y~x[,2:10])) Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) ** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] < 2e-16 *** X[, 2:10] X[, 2:10] X[, 2:10] X[, 2:10] X[, 2:10]

44

45 v K t = v K t 1 + w K 2 U pb K θ K t 1 + w K l U gbk θ K t 1 θ K t θ K t 1 + v K t

46

47 # initialization swarm.hist = array(0, c(nrow(g), ncol(g), maxiter)) swarm.hist[,,1]=g p.b.hist = apply(g,1,func) global.best.v = min(p.b.hist) p.best = G g.best =matrix(g[which.min(p.b.hist),], nrow=nrow(g), ncol=ncol(g), byrow=t) v = matrix(0,nrow = nrow(g),ncol = ncol(g))

48 # main loop for (i.iter in 2:maxIter){ v = v + wp*runif(1)*(p.best - G) + wg*runif(1)*(g.best - G) G = G + v fitg = apply(g,1,func) if (min(fitg) < global.best.v){ g.best = matrix(g[which.min(fitg),],nrow=nrow(g),ncol=ncol(g),byrow=t) global.best.v = min(fitg) idx = which(fitg < p.b.hist) p.best[idx,] = G[idx,] p.b.hist= fitg swarm.hist[,,i.iter]=g

49 z = x n 16x ] 5x + y n 16y ] 5y

50

51 y = p 0 (w x + w ] x ] θ) 1 (w x + w ] x ] > θ)

52 y = p 0 (w x + w ] x ] θ) 1 (w x + w ] x ] > θ). y = p 0 (0.5x + 0.5x ] 0.7) 1 (0.5x + 0.5x ] > 0.7)

53 y = p 0 ( 0.5x 0.5x ] 0.7) 1 ( 0.5x 0.5x ] > 0.7) y = p 0 (0.5x + 0.5x ] 0.3) 1 (0.5x + 0.5x ] > 0.3)

54 AND.gate <- function(x1, x2){ w1 = 0.5 w2 = 0.5 theta = 0.7 y.temp = w1*x1 + w2*x2 if (y.temp <= theta){ y = 0 else { y = 1 return(y)

55 AND.gate <- function(x1, x2){ w1 = 0.5; w2 = 0.5; theta = 0.7 return(as.numeric(w1*x1 + w2*x2 > theta))

56 NAND.gate <- function(x1, x2){ w1 = -0.5; w2 = -0.5; theta = -0.7 return(as.numeric(w1*x1 + w2*x2 > theta)) OR.gate <- function(x1, x2){ w1 = 0.5; w2 = 0.5; theta = 0.3 return(as.numeric(w1*x1 + w2*x2 > theta))

57 y = p 0 (w x + w ] x ] θ) 1 (w x + w ] x ] > θ) y = p 0 (b + w x + w ] x ] 0) 1 (b + w x + w ] x ] > 0)

58 AND.gate <- function(x1, x2){ w1 = 0.5 w2 = 0.5 b = -0.7 y.temp = w1*x1 + w2*x2 + b if (y.temp <= 0){ y = 0 else { y = 1 return(y)

59 AND, NAND, and OR AND.gate <- function(x1, x2){ w1 = 0.5; w2 = 0.5; b = -0.7 return(as.numeric(w1*x1 + w2*x2 + b > 0)) NAND.gate <- function(x1, x2){ w1 = -0.5; w2 = -0.5; b = 0.7 return(as.numeric(w1*x1 + w2*x2 + b > 0)) OR.gate <- function(x1, x2){ w1 = 0.5; w2 = 0.5; b = -0.3 return(as.numeric(w1*x1 + w2*x2 + b > 0))

60

61

62 XOR.gate <- function(x1, x2){ gate1 <- NAND.gate(x1,x2) gate2 <- OR.gate(x1,x2) y <- AND.gate(gate1,gate2) return(y)

63 y = p 0 (b + w x + w ] x ] 0) 1 (b + w x + w ] x ] > 0) y = h b + w x + w ] x ] 0 (x 0) h x = p 1 (x > 0)

64 a = b + w x + w ] x ] y = h a

65 0 (x 0) h x = p 1 (x > 0) step.func <- function(x){ return(as.numeric(x > 0))

66 0 (x 0) h x = p 1 (x > 0) step.func <- function(x){ return(as.numeric(x > 0)) x = seq(-5, 5, 0.1) y = step.func(x) plot(x,y, ylab = 'y', xlab = 'a', type ="l", lwd =2)

67 h x = 0exp w1 sigmoid.func <- function(x){ return(1(1+exp(-x)))

68 h x = 0exp w1 sigmoid.func <- function(x){ return(1(1+exp(-x))) y = sigmoid.func(x) plot(x,y, ylab = 'y', xlab = 'a', type ="l", lwd =2)

69 y = sigmoid.func(x) y.step = step.func(x) y.sigm = sigmoid.func(x) plot(x,y.step, ylab = 'y', xlab = 'a', type ="l", lwd =2) lines(x,y.sigm, lwd =2, lty = 2)

70 0 (x 0) h x = p x (x > 0) relu.func <- function(x){ return(pmax(0,x)) y.relu = relu.func(x) plot(x,y.relu, ylab = 'y', xlab = 'a', type ="l", lwd =2)

71 a a ] a ] a ]] b b ] = a b + a ] b ] a b ] + a ] b ]] b ] b ]] a ] b + a ] b ] a ] b ] + a ]] b ]] = A = matrix(1:4, nrow = 2, byrow = T) B = matrix(5:8, nrow = 2, byrow = T) > A%*%B [,1] [,2] [1,] [2,] 43 50

72 A = matrix(1:6, nrow = 3, byrow = T) B = matrix(7:8, nrow = 2, byrow = T > A%*%B [,1] [1,] 23 [2,] 53 [3,] 83

73 b w w ] a a ] a z z ] z a ] a ] ] z ] z ] ] w 緑 : 層 : 次層のニューロン : 前層のニューロン

74 b w w ] a a ] a z z ] z X = x x ] A 1 = a a ] a B 1 = b b ] b W 1 = w w ] w w ] w ]] w ] A 1 = XW 1 + B Z 1 = z z ] z = h A 1 x = c(1,0.5) W1 = matrix((1:6)*0.1, nrow = 2) B1 = (1:3)*0.1 > A1 [,1] [,2] [,3] [1,] Z1 = sigmoid.func(a1) > Z1 [,1] [,2] [,3] [1,]

75 a z a ] z ] a ] a z ] z a ] ] z ] ] w 緑 : 層 : 次層のニューロン : 前層のニューロン

76 x = c(1,0.5) W1 = matrix((1:6)*0.1, nrow = 2) B1 = (1:3)*0.1 Z1 = sigmoid.func(a1) W2 = matrix((1:6)*0.1, nrow = 3) B2 = c(0.1, 0.2) A2 = Z1%*%W2 + B2 Z2 = sigmoid.func(a2) W3 = matrix((1:4)*0.1, nrow = 2) B3 = c(0.1, 0.2) A3 = Z2%*%W3+ B3 Z3 = A3 > Z3 [,1] [,2] [1,] X = x x ] A 1 = a a ] a B 1 = b b ] b W 1 = w w ] w w ] w ]] w ] A 1 = XW 1 + B 1 Z 1 = h A 1 A 2 = Z 1 W 2 + B 2 Z 2 = h A 2 A 3 = Z 2 W 3 + B 3 Z 3 = h A 3

77 # function to initialize 3L network init.3l.network <- function(){ W1 = matrix((1:6)*0.1, nrow = 2) B1 = (1:3)*0.1 W2 = matrix((1:6)*0.1, nrow = 3) B2 = c(0.1, 0.2) W3 = matrix((1:4)*0.1, nrow = 2) B3 = c(0.1, 0.2) return(list(w1 = W1, B1 = B1, W2 = W2, B2 = B2, W3 = W3, B3 = B3)) # feedforward process forward.3l <- function(network, x){ A1 = x%*%network$w1 + network$b1 Z1 = sigmoid.func(a1) A2 = Z1%*%network$W2 + network$b2 Z2 = sigmoid.func(a2) A3 = Z2%*%network$W3 + network$b3 Z3 = sigmoid.func(a3) A3 = Z3 return(a3) network<-init.3l.network() y = forward.3l(network, c(1, 0.5)) > y [,1] [,2] [1,]

78 σ( ) σ( ) y = a y = exp D ƒ exp D σ( ) y = exp D ƒ0 ˆ exp D 0 ˆ σ( )

79 y = exp D ƒ0 ˆ exp D 0 ˆ > exp(a)sum(exp(a)) [1] NaN NaN NaN σ( ) softmax.func <- function(x){ max.x = max(x) return(exp(x-max.x)sum(exp(x-max.x))) > softmax.func(a) [1] e e e-09

80 train <- read.csv(' header=true) train <- data.matrix(train) train.x <- train[,-1] train.y <- train[,1] train.x <- t(train.x255) download.file(" "trnetwork.rdata") load("trnetwork.rdata") network=trnetwork

81 n.train = ncol(train.x) correct.cl = 0 conf.matrix = matrix(0,10,10) for (i.loop in 1:n.train){ y = forward.3l(network,train.x[,i.loop]) max.y = max.col(y) conf.matrix[max.y, (train.y[i.loop]+1)] = conf.matrix[max.y, (train.y[i.loop]+1)] + 1 accuracy = sum(diag(conf.matrix))n.train

82

83 batch_size = 200 conf.matrix = matrix(0,10,10) for (i.batch in seq(1,n.train, batch_size)){ y = forward.3l(network, train.x[,(i.batch:(i.batch+batch_size-1))]) pred = max.col(y) conf.matrix = conf.matrix + table(pred,(train.y[i.batch:(i.batch+batch_size-1)]+1)) accuracy = sum(diag(conf.matrix))n.train

84 Batch > system.time({ + for (i.batch in seq(1,n.train, batch_size)){ + y = forward.3l(network, train.x[,(i.batch:(i.batch+batch_size-1))]) + pred = max.col(y) + conf.matrix = conf.matrix+table(pred,(train.y[i.batch:(i.batch+batch_size-1)]+1)) + + ) user system elapsed Online > system.time({ + for (i.loop in 1:n.train){ + y = forward.3l(network,train.x[,i.loop]) + max.y = max.col(y) + conf.matrix[max.y, (train.y[i.loop]+1)] = conf.matrix[max.y, (train.y[i.loop]+1)] ) user system elapsed

DAA09

DAA09 > summary(dat.lm1) Call: lm(formula = sales ~ price, data = dat) Residuals: Min 1Q Median 3Q Max -55.719-19.270 4.212 16.143 73.454 Coefficients: Estimate Std. Error t value Pr(> t ) (Intercept) 237.1326

More information

(lm) lm AIC 2 / 1

(lm) lm AIC 2 / 1 W707 s-taiji@is.titech.ac.jp 1 / 1 (lm) lm AIC 2 / 1 : y = β 1 x 1 + β 2 x 2 + + β d x d + β d+1 + ϵ (ϵ N(0, σ 2 )) y R: x R d : β i (i = 1,..., d):, β d+1 : ( ) (d = 1) y = β 1 x 1 + β 2 + ϵ (d > 1) y

More information

Use R

Use R Use R! 2008/05/23( ) Index Introduction (GLM) ( ) R. Introduction R,, PLS,,, etc. 2. Correlation coefficient (Pearson s product moment correlation) r = Sxy Sxx Syy :, Sxy, Sxx= X, Syy Y 1.96 95% R cor(x,

More information

1 R Windows R 1.1 R The R project web R web Download [CRAN] CRAN Mirrors Japan Download and Install R [Windows 9

1 R Windows R 1.1 R The R project web   R web Download [CRAN] CRAN Mirrors Japan Download and Install R [Windows 9 1 R 2007 8 19 1 Windows R 1.1 R The R project web http://www.r-project.org/ R web Download [CRAN] CRAN Mirrors Japan Download and Install R [Windows 95 and later ] [base] 2.5.1 R - 2.5.1 for Windows R

More information

k2 ( :35 ) ( k2) (GLM) web web 1 :

k2 ( :35 ) ( k2) (GLM) web   web   1 : 2012 11 01 k2 (2012-10-26 16:35 ) 1 6 2 (2012 11 01 k2) (GLM) kubo@ees.hokudai.ac.jp web http://goo.gl/wijx2 web http://goo.gl/ufq2 1 : 2 2 4 3 7 4 9 5 : 11 5.1................... 13 6 14 6.1......................

More information

講義のーと : データ解析のための統計モデリング. 第3回

講義のーと :  データ解析のための統計モデリング. 第3回 Title 講義のーと : データ解析のための統計モデリング Author(s) 久保, 拓弥 Issue Date 2008 Doc URL http://hdl.handle.net/2115/49477 Type learningobject Note この講義資料は, 著者のホームページ http://hosho.ees.hokudai.ac.jp/~kub ードできます Note(URL)http://hosho.ees.hokudai.ac.jp/~kubo/ce/EesLecture20

More information

R John Fox R R R Console library(rcmdr) Rcmdr R GUI Windows R R SDI *1 R Console R 1 2 Windows XP Windows * 2 R R Console R ˆ R

R John Fox R R R Console library(rcmdr) Rcmdr R GUI Windows R R SDI *1 R Console R 1 2 Windows XP Windows * 2 R R Console R ˆ R R John Fox 2006 8 26 2008 8 28 1 R R R Console library(rcmdr) Rcmdr R GUI Windows R R SDI *1 R Console R 1 2 Windows XP Windows * 2 R R Console R ˆ R GUI R R R Console > ˆ 2 ˆ Fox(2005) jfox@mcmaster.ca

More information

インターネットを活用した経済分析 - フリーソフト Rを使おう

インターネットを活用した経済分析 - フリーソフト Rを使おう R 1 1 1 2017 2 15 2017 2 15 1/64 2 R 3 R R RESAS 2017 2 15 2/64 2 R 3 R R RESAS 2017 2 15 3/64 2-4 ( ) ( (80%) (20%) 2017 2 15 4/64 PC LAN R 2017 2 15 5/64 R R 2017 2 15 6/64 3-4 R 15 + 2017 2 15 7/64

More information

28

28 y i = Z i δ i +ε i ε i δ X y i = X Z i δ i + X ε i [ ] 1 δ ˆ i = Z i X( X X) 1 X Z i [ ] 1 σ ˆ 2 Z i X( X X) 1 X Z i Z i X( X X) 1 X y i σ ˆ 2 ˆ σ 2 = [ ] y i Z ˆ [ i δ i ] 1 y N p i Z i δ ˆ i i RSTAT

More information

untitled

untitled 2011/6/22 M2 1*1+2*2 79 2F Y YY 0.0 0.2 0.4 0.6 0.8 0.000 0.002 0.004 0.006 0.008 0.010 0.012 1.0 1.5 2.0 2.5 3.0 3.5 4.0 Y 0 50 100 150 200 250 YY A (Y = X + e A ) B (YY = X + e B ) X 0.00 0.05 0.10

More information

1 15 R Part : website:

1 15 R Part : website: 1 15 R Part 4 2017 7 24 4 : website: email: http://www3.u-toyama.ac.jp/kkarato/ kkarato@eco.u-toyama.ac.jp 1 2 2 3 2.1............................... 3 2.2 2................................. 4 2.3................................

More information

一般化線形 (混合) モデル (2) - ロジスティック回帰と GLMM

一般化線形 (混合) モデル (2) - ロジスティック回帰と GLMM .. ( ) (2) GLMM kubo@ees.hokudai.ac.jp I http://goo.gl/rrhzey 2013 08 27 : 2013 08 27 08:29 kubostat2013ou2 (http://goo.gl/rrhzey) ( ) (2) 2013 08 27 1 / 74 I.1 N k.2 binomial distribution logit link function.3.4!

More information

201711grade2.pdf

201711grade2.pdf 2017 11 26 1 2 28 3 90 4 5 A 1 2 3 4 Web Web 6 B 10 3 10 3 7 34 8 23 9 10 1 2 3 1 (A) 3 32.14 0.65 2.82 0.93 7.48 (B) 4 6 61.30 54.68 34.86 5.25 19.07 (C) 7 13 5.89 42.18 56.51 35.80 50.28 (D) 14 20 0.35

More information

BMIdata.txt DT DT <- read.table("bmidata.txt") DT head(dt) names(dt) str(dt)

BMIdata.txt DT DT <- read.table(bmidata.txt) DT head(dt) names(dt) str(dt) ?read.table read.table(file, header = FALSE, sep = "", quote = "\" ", dec = ".", numerals = c("allow.loss", "warn.loss", "no.loss"), row.names, col.names, as.is =!stringsasfactors, na.strings = "NA", colclasses

More information

1.2 R R Windows, Macintosh, Linux(Unix) Windows Mac R Linux redhat, debian, vinelinux ( ) RjpWiki ( RjpWiki Wiki

1.2 R R Windows, Macintosh, Linux(Unix) Windows Mac R Linux redhat, debian, vinelinux ( ) RjpWiki (  RjpWiki Wiki R 2005 9 12 ( ) 1 R 1.1 R R R S-PLUS( ) S version 4( ) S (AT&T Richard A. Becker, John M. Chambers, and Allan R. Wilks ) S S R R S ( ) S GUI( ) ( ) R R R R http://stat.sm.u-tokai.ac.jp/ yama/r/ R yamamoto@sm.u-tokai.ac.jp

More information

第11回:線形回帰モデルのOLS推定

第11回:線形回帰モデルのOLS推定 11 OLS 2018 7 13 1 / 45 1. 2. 3. 2 / 45 n 2 ((y 1, x 1 ), (y 2, x 2 ),, (y n, x n )) linear regression model y i = β 0 + β 1 x i + u i, E(u i x i ) = 0, E(u i u j x i ) = 0 (i j), V(u i x i ) = σ 2, i

More information

PackageSoft/R-033U.tex (2018/March) R:

PackageSoft/R-033U.tex (2018/March) R: ................................................................................ R: 2018 3 29................................................................................ R AI R https://cran.r-project.org/doc/contrib/manuals-jp/r-intro-170.jp.pdf

More information

「統 計 数 学 3」

「統 計 数 学 3」 関数の使い方 1 関数と引数 関数の構造 関数名 ( 引数 1, 引数 2, 引数 3, ) 例 : マハラノビス距離を求める関数 mahalanobis(data,m,v) 引数名を指定して記述する場合 mahalanobis(x=data, center=m, cov=v) 2 関数についてのヘルプ 基本的な関数のヘルプの呼び出し? 関数名 例 :?mean 例 :?mahalanobis 指定できる引数を確認する関数

More information

9 8 7 (x-1.0)*(x-1.0) *(x-1.0) (a) f(a) (b) f(a) Figure 1: f(a) a =1.0 (1) a 1.0 f(1.0)

9 8 7 (x-1.0)*(x-1.0) *(x-1.0) (a) f(a) (b) f(a) Figure 1: f(a) a =1.0 (1) a 1.0 f(1.0) E-mail: takio-kurita@aist.go.jp 1 ( ) CPU ( ) 2 1. a f(a) =(a 1.0) 2 (1) a ( ) 1(a) f(a) a (1) a f(a) a =2(a 1.0) (2) 2 0 a f(a) a =2(a 1.0) = 0 (3) 1 9 8 7 (x-1.0)*(x-1.0) 6 4 2.0*(x-1.0) 6 2 5 4 0 3-2

More information

> usdata01 と打ち込んでエンター キーを押すと V1 V2 V : : : : のように表示され 読み込まれていることがわかる ここで V1, V2, V3 は R が列のデータに自 動的につけた変数名である ( variable

> usdata01 と打ち込んでエンター キーを押すと V1 V2 V : : : : のように表示され 読み込まれていることがわかる ここで V1, V2, V3 は R が列のデータに自 動的につけた変数名である ( variable R による回帰分析 ( 最小二乗法 ) この資料では 1. データを読み込む 2. 最小二乗法によってパラメーターを推定する 3. データをプロットし 回帰直線を書き込む 4. いろいろなデータの読み込み方について簡単に説明する 1. データを読み込む 以下では read.table( ) 関数を使ってテキストファイル ( 拡張子が.txt のファイル ) のデー タの読み込み方を説明する 1.1

More information

講義のーと : データ解析のための統計モデリング. 第2回

講義のーと :  データ解析のための統計モデリング. 第2回 Title 講義のーと : データ解析のための統計モデリング Author(s) 久保, 拓弥 Issue Date 2008 Doc URL http://hdl.handle.net/2115/49477 Type learningobject Note この講義資料は, 著者のホームページ http://hosho.ees.hokudai.ac.jp/~kub ードできます Note(URL)http://hosho.ees.hokudai.ac.jp/~kubo/ce/EesLecture20

More information

q( ) 2: R 2 R R R R C:nProgram FilesnRnrw1030) [File] [Change Dir] c:ndatadir OK 2

q( ) 2: R 2 R R R R C:nProgram FilesnRnrw1030) [File] [Change Dir] c:ndatadir OK 2 R 2001 9 R R S Splus R S 1 R 1: R 2 [File] [Exit] 1 q( ) 2: R 2 R R R R C:nProgram FilesnRnrw1030) [File] [Change Dir] c:ndatadir OK 2 2.1 7+3 1 10 7-3 7*3 7/3 7^3 2 > 7+3 [1] 10 > 7-3 [1] 4 > 7*3 [1]

More information

最小2乗法

最小2乗法 2 2012 4 ( ) 2 2012 4 1 / 42 X Y Y = f (X ; Z) linear regression model X Y slope X 1 Y (X, Y ) 1 (X, Y ) ( ) 2 2012 4 2 / 42 1 β = β = β (4.2) = β 0 + β (4.3) ( ) 2 2012 4 3 / 42 = β 0 + β + (4.4) ( )

More information

J1順位と得点者数の関係分析

J1順位と得点者数の関係分析 2015 年度 S-PLUS & Visual R Platform 学生研究奨励賞応募 J1 順位と得点者数の関係分析 -J リーグの得点数の現状 - 目次 1. はじめに 2. 研究目的 データについて 3.J1 リーグの得点数の現状 4. 分析 5. まとめ 6. 今後の課題 - 参考文献 - 東海大学情報通信学部 経営システム工学科 山田貴久 1. はじめに 1993 年 5 月 15 日に

More information

²¾ÁÛ¾õ¶·É¾²ÁË¡¤Î¤¿¤á¤Î¥Ñ¥Ã¥±¡¼¥¸DCchoice ¡Ê»ÃÄêÈÇ¡Ë

²¾ÁÛ¾õ¶·É¾²ÁË¡¤Î¤¿¤á¤Î¥Ñ¥Ã¥±¡¼¥¸DCchoice ¡Ê»ÃÄêÈÇ¡Ë DCchoice ( ) R 2013 2013 11 30 DCchoice package R 2013/11/30 1 / 19 1 (CV) CV 2 DCchoice WTP 3 DCchoice package R 2013/11/30 2 / 19 (Contingent Valuation; CV) WTP CV WTP WTP 1 1989 2 DCchoice package R

More information

R Console >R ˆ 2 ˆ 2 ˆ Graphics Device 1 Rcmdr R Console R R Rcmdr Rcmdr Fox, 2007 Fox and Carvalho, 2012 R R 2

R Console >R ˆ 2 ˆ 2 ˆ Graphics Device 1 Rcmdr R Console R R Rcmdr Rcmdr Fox, 2007 Fox and Carvalho, 2012 R R 2 R John Fox Version 1.9-1 2012 9 4 2012 10 9 1 R R Windows R Rcmdr Mac OS X Linux R OS R R , R R Console library(rcmdr)

More information

卒業論文

卒業論文 Y = ax 1 b1 X 2 b2...x k bk e u InY = Ina + b 1 InX 1 + b 2 InX 2 +...+ b k InX k + u X 1 Y b = ab 1 X 1 1 b 1 X 2 2...X bk k e u = b 1 (ax b1 1 X b2 2...X bk k e u ) / X 1 = b 1 Y / X 1 X 1 X 1 q YX1

More information

kubostat2017c p (c) Poisson regression, a generalized linear model (GLM) : :

kubostat2017c p (c) Poisson regression, a generalized linear model (GLM) : : kubostat2017c p.1 2017 (c), a generalized linear model (GLM) : kubo@ees.hokudai.ac.jp http://goo.gl/76c4i 2017 11 14 : 2017 11 07 15:43 kubostat2017c (http://goo.gl/76c4i) 2017 (c) 2017 11 14 1 / 47 agenda

More information

kubostat2018d p.2 :? bod size x and fertilization f change seed number? : a statistical model for this example? i response variable seed number : { i

kubostat2018d p.2 :? bod size x and fertilization f change seed number? : a statistical model for this example? i response variable seed number : { i kubostat2018d p.1 I 2018 (d) model selection and kubo@ees.hokudai.ac.jp http://goo.gl/76c4i 2018 06 25 : 2018 06 21 17:45 1 2 3 4 :? AIC : deviance model selection misunderstanding kubostat2018d (http://goo.gl/76c4i)

More information

σ t σ t σt nikkei HP nikkei4csv H R nikkei4<-readcsv("h:=y=ynikkei4csv",header=t) (1) nikkei header=t nikkei4csv 4 4 nikkei nikkei4<-dataframe(n

σ t σ t σt nikkei HP nikkei4csv H R nikkei4<-readcsv(h:=y=ynikkei4csv,header=t) (1) nikkei header=t nikkei4csv 4 4 nikkei nikkei4<-dataframe(n R 1 R R R tseries fseries 1 tseries fseries R Japan(Tokyo) R library(tseries) library(fseries) 2 t r t t 1 Ω t 1 E[r t Ω t 1 ] ɛ t r t = E[r t Ω t 1 ] + ɛ t ɛ t 2 iid (independently, identically distributed)

More information

1 2 Windows 7 *3 Windows * 4 R R Console R R Console ˆ R GUI R R R *5 R 2 R R R 6.1 ˆ 2 ˆ 2 ˆ Graphics Device 1 Rcmdr R Console R Rconsole R --sdi R M

1 2 Windows 7 *3 Windows * 4 R R Console R R Console ˆ R GUI R R R *5 R 2 R R R 6.1 ˆ 2 ˆ 2 ˆ Graphics Device 1 Rcmdr R Console R Rconsole R --sdi R M R John Fox and Milan Bouchet-Valat Version 2.0-1 2013 11 8 2013 11 11 1 R Fox 2005 R R Core Team, 2013 GUI R R R R R R R R R the Comprehensive R Archive Network (CRAN) R CRAN 6.4 R Windows R Rcmdr Mac

More information

60 (W30)? 1. ( ) 2. ( ) web site URL ( :41 ) 1/ 77

60 (W30)? 1. ( ) 2. ( ) web site URL ( :41 ) 1/ 77 60 (W30)? 1. ( ) kubo@ees.hokudai.ac.jp 2. ( ) web site URL http://goo.gl/e1cja!! 2013 03 07 (2013 03 07 17 :41 ) 1/ 77 ! : :? 2013 03 07 (2013 03 07 17 :41 ) 2/ 77 2013 03 07 (2013 03 07 17 :41 ) 3/ 77!!

More information

kubostat2015e p.2 how to specify Poisson regression model, a GLM GLM how to specify model, a GLM GLM logistic probability distribution Poisson distrib

kubostat2015e p.2 how to specify Poisson regression model, a GLM GLM how to specify model, a GLM GLM logistic probability distribution Poisson distrib kubostat2015e p.1 I 2015 (e) GLM kubo@ees.hokudai.ac.jp http://goo.gl/76c4i 2015 07 22 2015 07 21 16:26 kubostat2015e (http://goo.gl/76c4i) 2015 (e) 2015 07 22 1 / 42 1 N k 2 binomial distribution logit

More information

: (EQS) /EQUATIONS V1 = 30*V F1 + E1; V2 = 25*V *F1 + E2; V3 = 16*V *F1 + E3; V4 = 10*V F2 + E4; V5 = 19*V99

: (EQS) /EQUATIONS V1 = 30*V F1 + E1; V2 = 25*V *F1 + E2; V3 = 16*V *F1 + E3; V4 = 10*V F2 + E4; V5 = 19*V99 218 6 219 6.11: (EQS) /EQUATIONS V1 = 30*V999 + 1F1 + E1; V2 = 25*V999 +.54*F1 + E2; V3 = 16*V999 + 1.46*F1 + E3; V4 = 10*V999 + 1F2 + E4; V5 = 19*V999 + 1.29*F2 + E5; V6 = 17*V999 + 2.22*F2 + E6; CALIS.

More information

こんにちは由美子です

こんにちは由美子です Analysis of Variance 2 two sample t test analysis of variance (ANOVA) CO 3 3 1 EFV1 µ 1 µ 2 µ 3 H 0 H 0 : µ 1 = µ 2 = µ 3 H A : Group 1 Group 2.. Group k population mean µ 1 µ µ κ SD σ 1 σ σ κ sample mean

More information

回帰分析 単回帰

回帰分析 単回帰 回帰分析 単回帰 麻生良文 単回帰モデル simple regression model = α + β + u 従属変数 (dependent variable) 被説明変数 (eplained variable) 独立変数 (independent variable) 説明変数 (eplanator variable) u 誤差項 (error term) 撹乱項 (disturbance term)

More information

p.1/22

p.1/22 p.1/22 & & & & Excel / p.2/22 & & & & Excel / p.2/22 ( ) ( ) p.3/22 ( ) ( ) Baldi Web p.3/22 ( ) ( ) Baldi Web ( ) ( ) ( p.3/22 ) Text Mining for Clementine True Teller Text Mining Studio Text Miner Trustia

More information

2 / 39

2 / 39 W707 s-taiji@is.titech.ac.jp 1 / 39 2 / 39 1 2 3 3 / 39 q f (x; α) = α j B j (x). j=1 min α R n+2 n ( d (Y i f (X i ; α)) 2 2 ) 2 f (x; α) + λ dx 2 dx. i=1 f B j 4 / 39 : q f (x) = α j B j (x). j=1 : x

More information

RとExcelを用いた分布推定の実践例

RとExcelを用いた分布推定の実践例 R Excel 1 2 1 2 2011/11/09 ( IMI) R Excel 2011/11/09 1 / 12 (1) R Excel (2) ( IMI) R Excel 2011/11/09 2 / 12 R Excel R R > library(fitdistrplus) > x fitdist(x,"norm","mle")

More information

<4D F736F F F696E74202D BD95CF97CA89F090CD F6489F18B4195AA90CD816A>

<4D F736F F F696E74202D BD95CF97CA89F090CD F6489F18B4195AA90CD816A> 主な多変量解析 9. 多変量解析 1 ( 重回帰分析 ) 目的変数 量的 説明変数 質的 あり量的 重回帰分析 数量化 Ⅰ 類 質的 判別分析 数量化 Ⅱ 類 なし 主成分分析因子分析多次元尺度構成法 数量化 Ⅲ 類数量化 Ⅳ 類 その他 クラスタ分析共分散構造分析 説明変数 : 独立変数 予測変数 目的変数 : 従属変数 基準変数 3 1. 単回帰分析各データの構造 y b ax a α: 1,,,

More information

B 5 (2) VBA R / B 5 ( ) / 34

B 5 (2) VBA R / B 5 ( ) / 34 B 5 (2) VBAR / B 5 (2014 11 17 ) / 34 VBA VBA (Visual Basic for Applications) Visual Basic VBAVisual Basic Visual BasicC B 5 (2014 11 17 ) 1 / 34 VBA 2 Excel.xlsm 01 Sub test() 02 Dim tmp As Double 03

More information

1 kawaguchi p.1/81

1 kawaguchi p.1/81 1 kawaguchi atsushi@kurume-u.ac.jp 2005 7 2 p.1/81 2.1 2.2 2.2.3 2.3 AUC 4.4 p.2/81 X Z X = α + βz + e α : Z = 0 X ( ) β : Z X ( ) e : 0 σ 2 p.3/81 2.1 Z X 1 0.045 2 0.114 4 0.215 6 0.346 7 0.41 8 0.52

More information

DAA02

DAA02 c(var1,var2,...,varn) > x x [1] 1 2 3 4 > x2 x2 [1] 1 2 3 4 5 6 7 8 c(var1,var2,...,varn) > y=c('a0','a1','b0','b1') > y [1] "a0" "a1" "b0" "b1 > z=c(x,y) > z [1] "1" "2"

More information

R による共和分分析 1. 共和分分析を行う 1.1 パッケージ urca インスツールする 共和分分析をするために R のパッケージ urca をインスツールする パッケージとは通常の R には含まれていない 追加的な R のコマンドの集まりのようなものである R には追加的に 600 以上のパッ

R による共和分分析 1. 共和分分析を行う 1.1 パッケージ urca インスツールする 共和分分析をするために R のパッケージ urca をインスツールする パッケージとは通常の R には含まれていない 追加的な R のコマンドの集まりのようなものである R には追加的に 600 以上のパッ R による共和分分析 1. 共和分分析を行う 1.1 パッケージ urca インスツールする 共和分分析をするために R のパッケージ urca をインスツールする パッケージとは通常の R には含まれていない 追加的な R のコマンドの集まりのようなものである R には追加的に 600 以上のパッケージが用意されており それぞれ分析の目的に応じて標準の R にパッケージを追加していくことになる インターネットに接続してあるパソコンで

More information

講義のーと : データ解析のための統計モデリング. 第5回

講義のーと :  データ解析のための統計モデリング. 第5回 Title 講義のーと : データ解析のための統計モデリング Author(s) 久保, 拓弥 Issue Date 2008 Doc URL http://hdl.handle.net/2115/49477 Type learningobject Note この講義資料は, 著者のホームページ http://hosho.ees.hokudai.ac.jp/~kub ードできます Note(URL)http://hosho.ees.hokudai.ac.jp/~kubo/ce/EesLecture20

More information

kubostat2017e p.1 I 2017 (e) GLM logistic regression : : :02 1 N y count data or

kubostat2017e p.1 I 2017 (e) GLM logistic regression : : :02 1 N y count data or kubostat207e p. I 207 (e) GLM kubo@ees.hokudai.ac.jp https://goo.gl/z9ycjy 207 4 207 6:02 N y 2 binomial distribution logit link function 3 4! offset kubostat207e (https://goo.gl/z9ycjy) 207 (e) 207 4

More information

鉄鋼協会プレゼン

鉄鋼協会プレゼン NN :~:, 8 Nov., Adaptive H Control for Linear Slider with Friction Compensation positioning mechanism moving table stand manipulator Point to Point Control [G] Continuous Path Control ground Fig. Positoining

More information

k3 ( :07 ) 2 (A) k = 1 (B) k = 7 y x x 1 (k2)?? x y (A) GLM (k

k3 ( :07 ) 2 (A) k = 1 (B) k = 7 y x x 1 (k2)?? x y (A) GLM (k 2012 11 01 k3 (2012-10-24 14:07 ) 1 6 3 (2012 11 01 k3) kubo@ees.hokudai.ac.jp web http://goo.gl/wijx2 web http://goo.gl/ufq2 1 3 2 : 4 3 AIC 6 4 7 5 8 6 : 9 7 11 8 12 8.1 (1)........ 13 8.2 (2) χ 2....................

More information

y i OLS [0, 1] OLS x i = (1, x 1,i,, x k,i ) β = (β 0, β 1,, β k ) G ( x i β) 1 G i 1 π i π i P {y i = 1 x i } = G (

y i OLS [0, 1] OLS x i = (1, x 1,i,, x k,i ) β = (β 0, β 1,, β k ) G ( x i β) 1 G i 1 π i π i P {y i = 1 x i } = G ( 7 2 2008 7 10 1 2 2 1.1 2............................................. 2 1.2 2.......................................... 2 1.3 2........................................ 3 1.4................................................

More information

基礎数学I

基礎数学I I & II ii ii........... 22................. 25 12............... 28.................. 28.................... 31............. 32.................. 34 3 1 9.................... 1....................... 1............

More information

(2/24) : 1. R R R

(2/24) : 1. R R R R? http://hosho.ees.hokudai.ac.jp/ kubo/ce/2004/ : kubo@ees.hokudai.ac.jp (2/24) : 1. R 2. 3. R R (3/24)? 1. ( ) 2. ( I ) : (p ) : cf. (power) p? (4/24) p ( ) I p ( ) I? ( ) (5/24)? 0 2 4 6 8 A B A B (control)

More information

80 X 1, X 2,, X n ( λ ) λ P(X = x) = f (x; λ) = λx e λ, x = 0, 1, 2, x! l(λ) = n f (x i ; λ) = i=1 i=1 n λ x i e λ i=1 x i! = λ n i=1 x i e nλ n i=1 x

80 X 1, X 2,, X n ( λ ) λ P(X = x) = f (x; λ) = λx e λ, x = 0, 1, 2, x! l(λ) = n f (x i ; λ) = i=1 i=1 n λ x i e λ i=1 x i! = λ n i=1 x i e nλ n i=1 x 80 X 1, X 2,, X n ( λ ) λ P(X = x) = f (x; λ) = λx e λ, x = 0, 1, 2, x! l(λ) = n f (x i ; λ) = n λ x i e λ x i! = λ n x i e nλ n x i! n n log l(λ) = log(λ) x i nλ log( x i!) log l(λ) λ = 1 λ n x i n =

More information

Stata11 whitepapers mwp-037 regress - regress regress. regress mpg weight foreign Source SS df MS Number of obs = 74 F(

Stata11 whitepapers mwp-037 regress - regress regress. regress mpg weight foreign Source SS df MS Number of obs = 74 F( mwp-037 regress - regress 1. 1.1 1.2 1.3 2. 3. 4. 5. 1. regress. regress mpg weight foreign Source SS df MS Number of obs = 74 F( 2, 71) = 69.75 Model 1619.2877 2 809.643849 Prob > F = 0.0000 Residual

More information

A_chapter3.dvi

A_chapter3.dvi : a b c d 2: x x y y 3: x y w 3.. 3.2 2. 3.3 3. 3.4 (x, y,, w) = (,,, )xy w (,,, )xȳ w (,,, ) xy w (,,, )xy w (,,, )xȳ w (,,, ) xy w (,,, )xy w (,,, ) xȳw (,,, )xȳw (,,, ) xyw, F F = xy w x w xy w xy w

More information

と入力する すると最初の 25 行が表示される 1 行目は変数の名前であり 2 列目は企業番号 (1,,10),3 列目は西暦 (1935,,1954) を表している ( 他のパネルデータを分析する際もデ ータをこのように並べておかなくてはならない つまりまず i=1 を固定し i=1 の t に関

と入力する すると最初の 25 行が表示される 1 行目は変数の名前であり 2 列目は企業番号 (1,,10),3 列目は西暦 (1935,,1954) を表している ( 他のパネルデータを分析する際もデ ータをこのように並べておかなくてはならない つまりまず i=1 を固定し i=1 の t に関 R によるパネルデータモデルの推定 R を用いて 静学的パネルデータモデルに対して Pooled OLS, LSDV (Least Squares Dummy Variable) 推定 F 検定 ( 個別効果なしの F 検定 ) GLS(Generalized Least Square : 一般化最小二乗 ) 法による推定 およびハウスマン検定を行うやり方を 動学的パネルデータモデルに対して 1 階階差

More information

( ) ( ) lex LL(1) LL(1)

( ) ( ) lex LL(1) LL(1) () () lex LL(1) LL(1) http://www.cs.info.mie-u.ac.jp/~toshi/lectures/compiler/ 29 5 14 1 1 () / (front end) (back end) (phase) (pass) 1 2 1 () () var left, right; fun int main() { left = 0; right = 10;

More information

<4D F736F F F696E74202D2088E D8C768A7789C482CC8A778D5A F939D8C76835C FC96E52E >

<4D F736F F F696E74202D2088E D8C768A7789C482CC8A778D5A F939D8C76835C FC96E52E > 2018 年 8 月 25-27 日遺伝統計学 夏の学校 @ 大阪大学講義実習資料 統計解析ソフトウェア R 入門 大阪大学大学院医学系研究科遺伝統計学 http://www.sg.med.osaka-u.ac.jp/index.html 1 講義の概要 統計解析ソフトウェア R 入門 1 統計解析ソフトウェアRについて 2Rのインストール方法 3 数値計算 変数 ( ベクトル テーブル ) の扱い

More information

untitled

untitled IT (1, horiike@ml.me.titech.ac.jp) (1, jun-jun@ms.kagu.tus.ac.jp) 1. 1-1 19802000 2000ITIT IT IT TOPIX (%) 1TOPIX 2 1-2. 80 80 ( ) 2004/11/26 S-PLUS 2 1-3. IT IT IT IT 2. 2-1. a. b. (Size) c. B/M(Book

More information

kubostat7f p GLM! logistic regression as usual? N? GLM GLM doesn t work! GLM!! probabilit distribution binomial distribution : : β + β x i link functi

kubostat7f p GLM! logistic regression as usual? N? GLM GLM doesn t work! GLM!! probabilit distribution binomial distribution : : β + β x i link functi kubostat7f p statistaical models appeared in the class 7 (f) kubo@eeshokudaiacjp https://googl/z9cjy 7 : 7 : The development of linear models Hierarchical Baesian Model Be more flexible Generalized Linear

More information

211 kotaro@math.titech.ac.jp 1 R *1 n n R n *2 R n = {(x 1,..., x n ) x 1,..., x n R}. R R 2 R 3 R n R n R n D D R n *3 ) (x 1,..., x n ) f(x 1,..., x n ) f D *4 n 2 n = 1 ( ) 1 f D R n f : D R 1.1. (x,

More information

keisoku01.dvi

keisoku01.dvi 2.,, Mon, 2006, 401, SAGA, JAPAN Dept. of Mechanical Engineering, Saga Univ., JAPAN 4 Mon, 2006, 401, SAGA, JAPAN Dept. of Mechanical Engineering, Saga Univ., JAPAN 5 Mon, 2006, 401, SAGA, JAPAN Dept.

More information

() / (front end) (back end) (phase) (pass) 1 2

() / (front end) (back end) (phase) (pass) 1 2 1 () () lex http://www.cs.info.mie-u.ac.jp/~toshi/lectures/compiler/ 2018 4 1 () / (front end) (back end) (phase) (pass) 1 2 () () var left, right; fun int main() { left = 0; right = 10; return ((left

More information

基礎から学ぶトラヒック理論 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます. このサンプルページの内容は, 初版 1 刷発行時のものです.

基礎から学ぶトラヒック理論 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます.   このサンプルページの内容は, 初版 1 刷発行時のものです. 基礎から学ぶトラヒック理論 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます. http://www.morikita.co.jp/books/mid/085221 このサンプルページの内容は, 初版 1 刷発行時のものです. i +α 3 1 2 4 5 1 2 ii 3 4 5 6 7 8 9 9.3 2014 6 iii 1 1 2 5 2.1 5 2.2 7

More information

4 OLS 4 OLS 4.1 nurseries dual c dual i = c + βnurseries i + ε i (1) 1. OLS Workfile Quick - Estimate Equation OK Equation specification dual c nurser

4 OLS 4 OLS 4.1 nurseries dual c dual i = c + βnurseries i + ε i (1) 1. OLS Workfile Quick - Estimate Equation OK Equation specification dual c nurser 1 EViews 2 2007/5/17 2007/5/21 4 OLS 2 4.1.............................................. 2 4.2................................................ 9 4.3.............................................. 11 4.4

More information

10:30 12:00 P.G. vs vs vs 2

10:30 12:00 P.G. vs vs vs 2 1 10:30 12:00 P.G. vs vs vs 2 LOGIT PROBIT TOBIT mean median mode CV 3 4 5 0.5 1000 6 45 7 P(A B) = P(A) + P(B) - P(A B) P(B A)=P(A B)/P(A) P(A B)=P(B A) P(A) P(A B) P(A) P(B A) P(B) P(A B) P(A) P(B) P(B

More information

<4D F736F F D20939D8C7689F090CD985F93C18EEA8D758B E646F63>

<4D F736F F D20939D8C7689F090CD985F93C18EEA8D758B E646F63> Gretl OLS omitted variable omitted variable AIC,BIC a) gretl gretl sample file Greene greene8_3 Add Define new variable l_g_percapita=log(g/pop) Pg,Y,Pnc,Puc,Ppt,Pd,Pn,Ps Add logs of selected variables

More information

Stata 11 Stata ROC whitepaper mwp anova/oneway 3 mwp-042 kwallis Kruskal Wallis 28 mwp-045 ranksum/median / 31 mwp-047 roctab/roccomp ROC 34 mwp-050 s

Stata 11 Stata ROC whitepaper mwp anova/oneway 3 mwp-042 kwallis Kruskal Wallis 28 mwp-045 ranksum/median / 31 mwp-047 roctab/roccomp ROC 34 mwp-050 s BR003 Stata 11 Stata ROC whitepaper mwp anova/oneway 3 mwp-042 kwallis Kruskal Wallis 28 mwp-045 ranksum/median / 31 mwp-047 roctab/roccomp ROC 34 mwp-050 sampsi 47 mwp-044 sdtest 54 mwp-043 signrank/signtest

More information

R R-console R R Rscript R-console GUI 1

R R-console R R Rscript R-console GUI 1 November 2015 R R-console R R Rscript R-console GUI 1 2 X Y 1 11.04 21.03 2 15.76 24.75 3 17.72 31.28 4 9.15 11.16 5 10.10 18.89 6 12.33 24.25 7 4.20 10.57 8 17.04 33.99 9 10.50 21.01 10 8.36 9.68 x =

More information

1 Stata SEM LightStone 4 SEM 4.. Alan C. Acock, Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press 3.

1 Stata SEM LightStone 4 SEM 4.. Alan C. Acock, Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press 3. 1 Stata SEM LightStone 4 SEM 4.. Alan C. Acock, 2013. Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press 3. 2 4, 2. 1 2 2 Depress Conservative. 3., 3,. SES66 Alien67 Alien71,

More information

e a b a b b a a a 1 a a 1 = a 1 a = e G G G : x ( x =, 8, 1 ) x 1,, 60 θ, ϕ ψ θ G G H H G x. n n 1 n 1 n σ = (σ 1, σ,..., σ N ) i σ i i n S n n = 1,,

e a b a b b a a a 1 a a 1 = a 1 a = e G G G : x ( x =, 8, 1 ) x 1,, 60 θ, ϕ ψ θ G G H H G x. n n 1 n 1 n σ = (σ 1, σ,..., σ N ) i σ i i n S n n = 1,, 01 10 18 ( ) 1 6 6 1 8 8 1 6 1 0 0 0 0 1 Table 1: 10 0 8 180 1 1 1. ( : 60 60 ) : 1. 1 e a b a b b a a a 1 a a 1 = a 1 a = e G G G : x ( x =, 8, 1 ) x 1,, 60 θ, ϕ ψ θ G G H H G x. n n 1 n 1 n σ = (σ 1,

More information

2011 8 26 3 I 5 1 7 1.1 Markov................................ 7 2 Gau 13 2.1.................................. 13 2.2............................... 18 2.3............................ 23 3 Gau (Le vy

More information

報告書

報告書 1 2 3 4 5 6 7 or 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 2.65 2.45 2.31 2.30 2.29 1.95 1.79 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 60 55 60 75 25 23 6064 65 60 1015

More information

% 10%, 35%( 1029 ) p (a) 1 p 95% (b) 1 Std. Err. (c) p 40% 5% (d) p 1: STATA (1). prtesti One-sample test of pr

% 10%, 35%( 1029 ) p (a) 1 p 95% (b) 1 Std. Err. (c) p 40% 5% (d) p 1: STATA (1). prtesti One-sample test of pr 1 1. 2014 6 2014 6 10 10% 10%, 35%( 1029 ) p (a) 1 p 95% (b) 1 Std. Err. (c) p 40% 5% (d) p 1: STATA (1). prtesti 1029 0.35 0.40 One-sample test of proportion x: Number of obs = 1029 Variable Mean Std.

More information

f(x) x S (optimal solution) f(x ) (optimal value) f(x) (1) 3 GLPK glpsol -m -d -m glpsol -h -m -d -o -y --simplex ( ) --interior --min --max --check -

f(x) x S (optimal solution) f(x ) (optimal value) f(x) (1) 3 GLPK glpsol -m -d -m glpsol -h -m -d -o -y --simplex ( ) --interior --min --max --check - GLPK by GLPK http://mukun mmg.at.infoseek.co.jp/mmg/glpk/ 17 7 5 : update 1 GLPK GNU Linear Programming Kit GNU LP/MIP ILOG AMPL(A Mathematical Programming Language) 1. 2. 3. 2 (optimization problem) X

More information

untitled

untitled WinLD R (16) WinLD https://www.biostat.wisc.edu/content/lan-demets-method-statistical-programs-clinical-trials WinLD.zip 2 2 1 α = 5% Type I error rate 1 5.0 % 2 9.8 % 3 14.3 % 5 22.6 % 10 40.1 % 3 Type

More information

こんにちは由美子です

こんにちは由美子です 1 2 . sum Variable Obs Mean Std. Dev. Min Max ---------+----------------------------------------------------- var1 13.4923077.3545926.05 1.1 3 3 3 0.71 3 x 3 C 3 = 0.3579 2 1 0.71 2 x 0.29 x 3 C 2 = 0.4386

More information

こんにちは由美子です

こんにちは由美子です Sample size power calculation Sample Size Estimation AZTPIAIDS AIDSAZT AIDSPI AIDSRNA AZTPr (S A ) = π A, PIPr (S B ) = π B AIDS (sampling)(inference) π A, π B π A - π B = 0.20 PI 20 20AZT, PI 10 6 8 HIV-RNA

More information

X G P G (X) G BG [X, BG] S 2 2 2 S 2 2 S 2 = { (x 1, x 2, x 3 ) R 3 x 2 1 + x 2 2 + x 2 3 = 1 } R 3 S 2 S 2 v x S 2 x x v(x) T x S 2 T x S 2 S 2 x T x S 2 = { ξ R 3 x ξ } R 3 T x S 2 S 2 x x T x S 2

More information

18 5 10 1 1 1.1 1.1.1 P Q P Q, P, Q P Q P Q P Q, P, Q 2 1 1.1.2 P.Q T F Z R 0 1 x, y x + y x y x y = y x x (y z) = (x y) z x + y = y + x x + (y + z) = (x + y) + z P.Q V = {T, F } V P.Q P.Q T F T F 1.1.3

More information

統計的データ解析

統計的データ解析 ds45 xspec qdp guplot oocalc (Error) gg (Radom Error)(Systematc Error) x, x,, x ( x, x,..., x x = s x x µ = lm = σ µ x x = lm ( x ) = σ ( ) = - x = js j ( ) = j= ( j) x x + xj x + xj j x + xj = ( x x

More information

H22 BioS (i) I treat1 II treat2 data d1; input group patno treat1 treat2; cards; ; run; I

H22 BioS (i) I treat1 II treat2 data d1; input group patno treat1 treat2; cards; ; run; I H BioS (i) I treat II treat data d; input group patno treat treat; cards; 8 7 4 8 8 5 5 6 ; run; I II sum data d; set d; sum treat + treat; run; sum proc gplot data d; plot sum * group ; symbol c black

More information

Excel ではじめる数値解析 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます. このサンプルページの内容は, 初版 1 刷発行時のものです.

Excel ではじめる数値解析 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます.   このサンプルページの内容は, 初版 1 刷発行時のものです. Excel ではじめる数値解析 サンプルページ この本の定価 判型などは, 以下の URL からご覧いただけます. http://www.morikita.co.jp/books/mid/009631 このサンプルページの内容は, 初版 1 刷発行時のものです. Excel URL http://www.morikita.co.jp/books/mid/009631 i Microsoft Windows

More information

W u = u(x, t) u tt = a 2 u xx, a > 0 (1) D := {(x, t) : 0 x l, t 0} u (0, t) = 0, u (l, t) = 0, t 0 (2)

W u = u(x, t) u tt = a 2 u xx, a > 0 (1) D := {(x, t) : 0 x l, t 0} u (0, t) = 0, u (l, t) = 0, t 0 (2) 3 215 4 27 1 1 u u(x, t) u tt a 2 u xx, a > (1) D : {(x, t) : x, t } u (, t), u (, t), t (2) u(x, ) f(x), u(x, ) t 2, x (3) u(x, t) X(x)T (t) u (1) 1 T (t) a 2 T (t) X (x) X(x) α (2) T (t) αa 2 T (t) (4)

More information

数値計算:常微分方程式

数値計算:常微分方程式 ( ) 1 / 82 1 2 3 4 5 6 ( ) 2 / 82 ( ) 3 / 82 C θ l y m O x mg λ ( ) 4 / 82 θ t C J = ml 2 C mgl sin θ θ C J θ = mgl sin θ = θ ( ) 5 / 82 ω = θ J ω = mgl sin θ ω J = ml 2 θ = ω, ω = g l sin θ = θ ω ( )

More information

AR(1) y t = φy t 1 + ɛ t, ɛ t N(0, σ 2 ) 1. Mean of y t given y t 1, y t 2, E(y t y t 1, y t 2, ) = φy t 1 2. Variance of y t given y t 1, y t

AR(1) y t = φy t 1 + ɛ t, ɛ t N(0, σ 2 ) 1. Mean of y t given y t 1, y t 2, E(y t y t 1, y t 2, ) = φy t 1 2. Variance of y t given y t 1, y t 87 6.1 AR(1) y t = φy t 1 + ɛ t, ɛ t N(0, σ 2 ) 1. Mean of y t given y t 1, y t 2, E(y t y t 1, y t 2, ) = φy t 1 2. Variance of y t given y t 1, y t 2, V(y t y t 1, y t 2, ) = σ 2 3. Thus, y t y t 1,

More information

s = 1.15 (s = 1.07), R = 0.786, R = 0.679, DW =.03 5 Y = 0.3 (0.095) (.708) X, R = 0.786, R = 0.679, s = 1.07, DW =.03, t û Y = 0.3 (3.163) + 0

s = 1.15 (s = 1.07), R = 0.786, R = 0.679, DW =.03 5 Y = 0.3 (0.095) (.708) X, R = 0.786, R = 0.679, s = 1.07, DW =.03, t û Y = 0.3 (3.163) + 0 7 DW 7.1 DW u 1, u,, u (DW ) u u 1 = u 1, u,, u + + + - - - - + + - - - + + u 1, u,, u + - + - + - + - + u 1, u,, u u 1, u,, u u +1 = u 1, u,, u Y = α + βx + u, u = ρu 1 + ɛ, H 0 : ρ = 0, H 1 : ρ 0 ɛ 1,

More information

1 Stata SEM LightStone 3 2 SEM. 2., 2,. Alan C. Acock, Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press.

1 Stata SEM LightStone 3 2 SEM. 2., 2,. Alan C. Acock, Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press. 1 Stata SEM LightStone 3 2 SEM. 2., 2,. Alan C. Acock, 2013. Discovering Structural Equation Modeling Using Stata, Revised Edition, Stata Press. 2 3 2 Conservative Depress. 3.1 2. SEM. 1. x SEM. Depress.

More information

2 1 Octave Octave Window M m.m Octave Window 1.2 octave:1> a = 1 a = 1 octave:2> b = 1.23 b = octave:3> c = 3; ; % octave:4> x = pi x =

2 1 Octave Octave Window M m.m Octave Window 1.2 octave:1> a = 1 a = 1 octave:2> b = 1.23 b = octave:3> c = 3; ; % octave:4> x = pi x = 1 1 Octave GNU Octave Matlab John W. Eaton 1992 2.0.16 2.1.35 Octave Matlab gnuplot Matlab Octave MATLAB [1] Octave [1] 2.7 Octave Matlab Octave Octave 2.1.35 2.5 2.0.16 Octave 1.1 Octave octave Octave

More information

Fgure : (a) precse but naccurate data. (b) accurate but mprecse data. [] Fg..(p.) Fgure : Accuracy vs Precson []p.0-0 () 05. m 0.35 m 05. ± 0.35m 05.

Fgure : (a) precse but naccurate data. (b) accurate but mprecse data. [] Fg..(p.) Fgure : Accuracy vs Precson []p.0-0 () 05. m 0.35 m 05. ± 0.35m 05. 9 3 Error Analyss [] Danel C. Harrs, Quanttatve Chemcal Analyss, Chap.3-5. th Ed. 003. [] J. R. Taylor (, 000. An Introducton to Error Analyss, nd Ed. 997 Unv. Sc. Books) [3] 00 ( [] 973 Posson [5] 99

More information

1 Amazon.co.jp *1 5 review *2 web Google web web 5 web web 5 (a) (b) (c) 3 S-PLUS S S-PLUS 1 S-PLUS S R R RMeCab *3 R term matrix S-PLUS S-PLUS *1 Ama

1 Amazon.co.jp *1 5 review *2 web Google web web 5 web web 5 (a) (b) (c) 3 S-PLUS S S-PLUS 1 S-PLUS S R R RMeCab *3 R term matrix S-PLUS S-PLUS *1 Ama 5 1 2 2 3 2.1....................................... 3 2.2............................... 4 2.3....................................... 5 2.4................................ 5 3 6 3.1.........................................

More information

1. A0 A B A0 A : A1,...,A5 B : B1,...,B

1. A0 A B A0 A : A1,...,A5 B : B1,...,B 1. A0 A B A0 A : A1,...,A5 B : B1,...,B12 2. 3. 4. 5. A0 A B f : A B 4 (i) f (ii) f (iii) C 2 g, h: C A f g = f h g = h (iv) C 2 g, h: B C g f = h f g = h 4 (1) (i) (iii) (2) (iii) (i) (3) (ii) (iv) (4)

More information

Outline I. Introduction: II. Pr 2 Ir 2 O 7 Like-charge attraction III.

Outline I. Introduction: II. Pr 2 Ir 2 O 7 Like-charge attraction III. Masafumi Udagawa Dept. of Physics, Gakushuin University Mar. 8, 16 @ in Gakushuin University Reference M. U., L. D. C. Jaubert, C. Castelnovo and R. Moessner, arxiv:1603.02872 Outline I. Introduction:

More information

fx-370ES_912ES_UsersGuide_J02

fx-370ES_912ES_UsersGuide_J02 Eng Eng 3 A Eng 1 1,234 Eng a 1234= W W 2 123 Eng a 123= 1W( ) S-D S-D π 72 A S-D π π nπ n d π c a b π c π ' f ' A S-D 1 A '5c6= f f f 73 2 π A 15(π)*'2c5= f 3 ' A!2e*!3= f 74 (CMPLX) 15 CMPLX N2 A u u

More information

26 1 11 1 3 1.1............................ 3 1.2................................ 3 1.3................................... 4 1.4................................ 5 1.5 p (p-value)................................

More information

I A A441 : April 15, 2013 Version : 1.1 I Kawahira, Tomoki TA (Shigehiro, Yoshida )

I A A441 : April 15, 2013 Version : 1.1 I   Kawahira, Tomoki TA (Shigehiro, Yoshida ) I013 00-1 : April 15, 013 Version : 1.1 I Kawahira, Tomoki TA (Shigehiro, Yoshida) http://www.math.nagoya-u.ac.jp/~kawahira/courses/13s-tenbou.html pdf * 4 15 4 5 13 e πi = 1 5 0 5 7 3 4 6 3 6 10 6 17

More information