(c) Jaroslaw Arabas ##############SELECTION############################ # selecting randomly `number' indices out of mu OK select.rand<-function(mu,number=mu) { return(sample(1:mu,number,replace=TRUE)) } #selecting the index of the best point OK select.best<-function(eval) { return(which.max(eval)) } # fitness propotionate selection select.prop<-function(mu,eval,number) { prob=eval+0.001/length(eval) prob=prob/sum(prob) return(sample(1:mu,number,prob=prob,replace=TRUE)) } # fitness propotionate selection with modification from minimum select.prop.mod<-function(mu,eval,number,correction,pop,lower.limit,upper.limit,cor.weight) { prob=eval-min(eval)+0.001/length(eval) prob=correction(pop,prob,lower.limit,upper.limit,eval,cor.weight) prob=prob/sum(prob) return(sample(1:mu,number,prob=prob,replace=TRUE)) } #############MUTATION############################## #definition of single difference vectors OK diff.vector<-function(archive,mu,arch.size=mu){ m1=sample(1:arch.size,mu,replace=TRUE) m2=(m1+sample(1:(arch.size-1),mu,replace=TRUE)-1)%%arch.size+1 return(archive[m2,]-archive[m1,]) } ############CROSSOVER################################# #binary crossover where pex is the exchange probability that #defines that genes are taken from parent2 OK cross.bin<-function(parents1,parents2,pex=0.5) { src=ceiling(runif(parents1)-pex) return(src*parents1+(1-src)*parents2) } ############REPLACEMENT############################# #selects indices of better point in a pairwise comparison repl.best<-function(eval1,eval2) { return( 2-(eval1>eval2)) } ############################initialization modes #uniform distribution within the boundary constraints OK init.unif<-function(lower.limit,upper.limit,mu) { ndim=length(lower.limit) pop=matrix(runif(mu*ndim),mu,ndim) ll=matrix(lower.limit,mu,ndim,byrow=TRUE) dl=matrix(upper.limit-lower.limit,mu,ndim,byrow=TRUE) return(ll+pop*dl) } ###############REPAIR STRATEGIES repair.reinit<-function(pop,lower.limit, upper.limit,original,eval.pop) { return(init.unif(lower.limit, upper.limit, dim(pop)[1])) } repair.project.narrow<-function(pop,lower.limit, upper.limit,original,eval.pop) { return((repair.project(pop,lower.limit, upper.limit,original) +repair.narrow.F(pop,lower.limit, upper.limit,original))/2) } #projection on constraints OK repair.project<-function(pop,lower.limit, upper.limit,original,eval.pop) { mu=dim(pop)[1];ndim=dim(pop)[2] EPS=0.01/ndim ll=matrix(lower.limit,mu,ndim,byrow=TRUE) dl=matrix(upper.limit-lower.limit,mu,ndim,byrow=TRUE) pop=(pop-ll)/dl popl=(pop>0)*pop+(pop<0)*EPS*dl*runif(pop) popu=1-popl popu=(popu>0)*popu+(popu<0)*EPS*dl*runif(pop) return(ll+(1-popu)*dl) } #projection on constraints OK repair.reflect<-function(pop,lower.limit, upper.limit,original,eval.pop) { mu=dim(pop)[1];ndim=dim(pop)[2] EPS=0.01/ndim ll=matrix(lower.limit,mu,ndim,byrow=TRUE) dl=matrix(upper.limit-lower.limit,mu,ndim,byrow=TRUE) pop=(pop-ll)/dl popl=(pop>0)*pop+((pop<0)*(-pop))%%1 popu=1-popl popu=(popu>0)*popu+((popu<0)*(-popu))%%1 return(ll+(1-popu)*dl) } # narrowing F to find the point nearby the constraints on the line # between the mutant and the original point OK repair.narrow.F<-function(pop,lower.limit, upper.limit,original,eval.pop) { # SECTION=1/1.6 #(golden ratio) SECTION=1/2 for (i in 1:dim(pop)[1]) { PATIENCE=10 if (mean(pop[i,]0 || mean(pop[i,]>upper.limit)>0) { while (PATIENCE>0) { # print(pop[i,]) new=SECTION*original[i,]+(1-SECTION)*pop[i,] if (mean(new0 || mean(new>upper.limit)>0) pop[i,]=new else original[i,]=new PATIENCE=PATIENCE-1 } pop[i,]=original[i,] } } return(pop) } #correction of the fitness correction.general<-function(pop, probs, lower, upper,eval.pop,cor.weight) { #cor.weight=1 mm=(upper+lower)/2; ss=0.2*(upper-lower) for(i in 1:length(probs)) probs[i]=probs[i]*(1-cor.weight*mean(dnorm(pop[i,],mm,ss)/dnorm(mm,mm,ss))) return(probs) } #correction of the fitness correction.narrow.F<-function(pop, probs, lower, upper,eval.pop,weight) { mm=(upper+lower)/2; ss=3*(upper-lower)/2 for(i in 1:length(probs)) probs=probs*(1-prod(dnorm(pop[i,],mm,ss)+0.002)) return(probs) } #correction of the fitness correction.neutral<-function(pop, probs, lower, upper,eval.pop,weight) { return(probs) } # searching for the feasible point on the line between the mutant and the # population middlepoint OK repair.narrow.middlepoint<-function(pop,lower.limit, upper.limit,original,eval.original) { eo=eval.original-min(eval.original) eo=eo/sum(eo) middle=matrix(colSums(original*eo),dim(original)[1],dim(original)[2],byrow=TRUE) return(repair.narrow.F(pop,lower.limit,upper.limit,middle,eval.original)) } #do nothing - just accept infeasible points OK repair.neutral<-function(pop,lower.limit, upper.limit,original,eval.original) { return (pop) } # reject all infeasible values of coordinates and replace them with their # original values OK repair.conservative.coordinates<-function(pop,lower.limit, upper.limit,original) { feasible=((pop>=lower.limit)&(pop<=upper.limit)) return(feasible*pop+(1-feasible)*original) } # reject all infeasible values of coordinates and replace them with their # original values OK repair.conservative<-function(pop,lower.limit, upper.limit,original) { feasible=matrix((rowMeans((pop>=lower.limit)&(pop<=upper.limit))==1),dim(pop)[1],dim(pop)[2]) return(feasible*pop+(1-feasible)*original) } #returns binary vector which indicates feasibility of points which.feasible<-function(pop,lower.limit, upper.limit) { if (is.matrix(pop)) { feasible=(rowMeans((pop>=lower.limit)&(pop<=upper.limit))==1) return(feasible) } else { feasible=(mean((pop>=lower.limit)&(pop<=upper.limit))==1) return(feasible) } } #changes of the scaling factor) plan.F<-function(FINIT, gen.no) { F=FINIT # inna metoda jest tu dana # c=(abs(-4:3/10)+0.2) # F=c[(ceiling(gen.no/5)-1)%%8+1]*FINIT # print(F) return(F) } #single step of a DE/rand/1/bin strategy DE.rand.1.bin.step<-function(pop,eval.pop,lower.limit,upper.limit,repair, fitness,mu,F=1.2,pex=0.9) { selected=select.rand(mu) mutants=pop[selected,]+F*diff.vector(pop,mu,mu) mutants=repair(mutants,lower.limit,upper.limit,pop) crossed=cross.bin(pop,mutants,pex) eval.crossed=fitness(crossed) successors=(1:mu)*(repl.best(eval.pop,eval.crossed)-1) offs=pop; eval.offs=eval.pop offs[successors,]=crossed[successors,]; eval.offs[successors]=eval.crossed[successors]; return(list(offs=offs,eval.offs=eval.offs)) } DE.rand.1.bin<-function(lower.limit,upper.limit,fitness,initialize, lower.init,upper.init,repair, mu,F=1.2,pex=0.9,MAXGEN=100) { ndim=length(lower.limit) archive.points=matrix(NA, (MAXGEN+1)*mu, ndim) archive.fits=(1:(MAXGEN+1))*NA pop=initialize(lower.init,upper.init,mu) eval.pop=fitness(pop) for (gen.no in 1:MAXGEN) { archive.points[((gen.no-1)*mu)+(1:mu),]=pop archive.fits[((gen.no-1)*mu)+(1:mu)]=eval.pop n=DE.rand.1.bin.step(pop,eval.pop,lower.limit,upper.limit,repair, fitness,mu,F,pex) pop=n$offs eval.pop=n$eval.offs } archive.points[(MAXGEN*mu)+(1:mu),]=pop archive.fits[(MAXGEN*mu)+(1:mu)]=eval.pop return(list(archive.points=archive.points,archive.fits=archive.fits)) } encode.neutral<-function(pop,lower.limit,upper.limit) { return (pop) } encode.tan<-function(pop,lower.limit,upper.limit) { return((atan(pop)/pi+0.5)*(upper.limit-lower.limit)+lower.limit) } #Gaussian mutation by the archive variance mutation.CMAES<-function(pop,number,archive,hor,P,F,vm) { C=eigen(cov(archive)) #print(C) #print("-------------------") C$values=(C$values>0)*C$values C.vectors=C$vectors; C.sqrt.values=sqrt(2*C$values)*F mutants=matrix(rnorm(number*dim(pop)[2]),number,dim(pop)[2]) #print(mutants) for(i in 1:number) mutants[i,]=C.vectors%*%C.sqrt.values*mutants[i,] mutants=pop+mutants+sqrt(vm)*rnorm(mutants) #print(mutants) return(mutants) } #differential mutation by accumulation of P differences mutation.DE<-function(pop,number,archive,hor,P,F,vm) { dv=pop*0 ak=matrix(runif((number)*P),number,P) for(k in 1:P) dv=dv+diff.vector(archive,number,hor)*ak[,k] mutants=pop+F*dv/sqrt(rowSums(ak^2))+sqrt(vm)*rnorm(pop) # mutants=pop+F*dv/sqrt(rowSums(ak^2))*runif(pop,0,2)+sqrt(vm)*rnorm(pop) return(mutants) } #single step of a DMEA algorithm DMEA.step<-function(pop,eval.pop,lower.limit,upper.limit,repair,mutation, fitness,mu,F=1.2,P,vm=0.5, archive, eval.archive,cor.weight) { hor=dim(archive)[1]; ndim=dim(archive)[2] added=0 offs=pop PATIENCE=1#sqrt(ndim)+1 while(added0) feasible=which.feasible(mutants,lower.limit,upper.limit) else feasible=(1:(mu-added))*0+1 sum.feas=sum(feasible) if (sum.feas>0) { if (is.matrix(mutants)) { offs[added+(1:sum.feas),]=mutants[feasible*(1:(mu-added)),] } else { offs[added+(1:sum.feas),]=mutants } added=added+sum.feas } } n1=mean(rowSums(offs-pop)^2) reps=repair(offs,lower.limit,upper.limit,pop,eval.pop) n2=mean(rowSums(reps-pop)^2) # print(n2/n1) # eval.reps=fitness(encode(reps,lower.limit,upper.limit)) eval.reps=fitness(reps) return(list(offs=reps,eval.offs=eval.reps)) } DMEA<-function(lower.limit,upper.limit,fitness,initialize, lower.init,upper.init,repair,mutation, mu=10,F=0.7,P=10,arch.past,MAXGEN=100) { MAXEVAL=MAXGEN*mu ndim=length(lower.limit) archive.points=matrix(NA, (MAXGEN+1)*(mu+2), ndim) archive.fits=(1:((MAXGEN+1)*(mu+2)))*NA n.eval=0 while (n.eval 0) wf=wf/sum(wf) else wf=wf*0+1/length(wf) wMidPoint=colSums(archive.points[(max(gen.no-arch.past,0)*(mu+2)+1):(gen.no*(mu+2)-2),]*wf) archive.points[((gen.no-1)*(mu+2)+mu+2),]=wMidPoint archive.fits[((gen.no-1)*(mu+2)+mu+2)]=fitness(matrix(wMidPoint,1,ndim)) if (gen.no<=MAXGEN) { n=DMEA.step(pop,eval.pop,lower.limit,upper.limit,repair,mutation, fitness,mu,F,P,vm=0.0001, archive.points[(max(gen.no-arch.past,0)*(mu+2)+1):(gen.no*(mu+2)),], archive.fits[(max(gen.no-arch.past,0)*(mu+2)+1):(gen.no*(mu+2))],cor.weight) } pop=n$offs eval.pop=n$eval.offs if (max(eval.pop)>best.so.far) {best.so.far=max(eval.pop);gen.best.so.far.improved=gen.no} gen.no=gen.no+1 } # archive.points[(MAXGEN*(mu+2))+(1:mu),]=pop # midPoint=colMeans(pop) # medPoint=apply(pop,2,'median') # archive.points[MAXGEN*(mu+2)+mu+1,]=midPoint # archive.points[MAXGEN*(mu+2)+mu+2,]=medPoint # archive.fits[((MAXGEN)*(mu+2))+(1:mu)]=eval.pop # archive.fits[(MAXGEN)*(mu+2)+mu+1]=fitness(matrix(midPoint,1,ndim)) # archive.fits[(MAXGEN)*(mu+2)+mu+2]=fitness(matrix(medPoint,1,ndim)) } return(list(archive.points=archive.points,archive.fits=archive.fits)) } #fitness function for testing purposes #fitness=function(x){return (-rowSums((10*(x+1))^2)-3.9*rowSums((10*(x-1))^2))} #UWAGA -TU JEST JUZ KOD FUNKCJI TESTOWYCH - NIE JEST ELEMENTEM ALGORYTMU trzygauss<-function(x,nwym=2) { x=t(matrix(x[,1:2],dim(x)[1],2)) #x=t(x[,1:2]) # macierz kowariancji C1 #m1=c(-2,2) #C1=matrix(NA,2,2); C1[1,1]=6; C1[1,2]=C1[2,1]=2; C1[2,2]=1 #e1=eigen(C1); E1=e1$vectors; V1=e1$values #e1=matrix(-0.3310069,2,2); e1[1,1]=e1[2,2]=-0.9436283; e1[1,2]=0.3310069 #V1=c(6.7015621, 0.2984379) F1=dnorm(E1[,1]%*%(x-m1),sd=sqrt(V1[1]))*dnorm(E1[,2]%*%(x-m1),sd=sqrt(V1[2])) #return(F1) # macierz kowariancji C2 #m2=c(0,-2) #m2=c(2,-2) #C2=matrix(NA,2,2); C2[1,1]=1; C2[1,2]=C2[2,1]=-1; C2[2,2]=4 #e2=eigen(C2); E2=e2$vectors; V2=e2$values #V2=c(4.3027756,0.6972244); #e2=matrix(0.9570920,2,2); e2[1,1]=e2[2,2]=-0.2897841;e2[1,2]=-0.957092; F2=dnorm(E2[,1]%*%(x-m2),sd=sqrt(V2[1]))*dnorm(E2[,2]%*%(x-m2),sd=sqrt(V2[2])) # macierz kowariancji C1 #m3=c(6,-6) #C3=matrix(NA,2,2); C3[1,1]=2; C3[1,2]=C3[2,1]=1; C3[2,2]=3 #e3=eigen(C3); E3=e3$vectors; V3=e3$values F3=dnorm(E3[,1]%*%(x-m3),sd=sqrt(V3[1]))*dnorm(E3[,2]%*%(x-m3),sd=sqrt(V3[2])) #return(F1+0*F2+0*F3) #return(9*F1+50*F2+F3) return(F1+F2+3*F3) return(F1) return(F2) return(3*F3) } przygotuj<-function() { # macierz kowariancji C1 m1<<-c(2,2) C1<<-matrix(NA,2,2); C1[1,1]<<-6; C1[1,2]<<-C1[2,1]<<-2; C1[2,2]<<-1 e1<<-eigen(C1); E1<<-e1$vectors; V1<<-e1$values #e1=matrix(-0.3310069,2,2); e1[1,1]=e1[2,2]=-0.9436283; e1[1,2]=0.3310069 #V1=c(6.7015621, 0.2984379) # macierz kowariancji C2 m2<<-c(2,-2) C2<<-matrix(NA,2,2); C2[1,1]<<-1; C2[1,2]<<-C2[2,1]<<- -1; C2[2,2]<<-4 e2<<-eigen(C2); E2<<-e2$vectors; V2<<-e2$values #V2=c(4.3027756,0.6972244); #e2=matrix(0.9570920,2,2); e2[1,1]=e2[2,2]=-0.2897841;e2[1,2]=-0.957092; # macierz kowariancji C1 m3<<-c(6,-6) C3<<-matrix(NA,2,2); C3[1,1]<<-2; C3[1,2]<<-C3[2,1]<<-1; C3[2,2]<<-3 e3<<-eigen(C3); E3<<-e3$vectors; V3<<-e3$values } przygotuj2<-function() { # macierz kowariancji C1 m1<<-c(9,11) C1<<-matrix(NA,2,2); C1[1,1]<<-0.4; C1[1,2]<<-C1[2,1]<<-0; C1[2,2]<<-0.2 e1<<-eigen(C1); E1<<-e1$vectors; V1<<-e1$values #e1=matrix(-0.3310069,2,2); e1[1,1]=e1[2,2]=-0.9436283; e1[1,2]=0.3310069 #V1=c(6.7015621, 0.2984379) # macierz kowariancji C2 #m2=c(0,-2) m2<<-c(-5,0) C2<<-matrix(NA,2,2); C2[1,1]<<-16; C2[1,2]<<-C2[2,1]<<--8; C2[2,2]<<-40 e2<<-eigen(C2); E2<<-e2$vectors; V2<<-e2$values #V2=c(4.3027756,0.6972244); #e2=matrix(0.9570920,2,2); e2[1,1]=e2[2,2]=-0.2897841;e2[1,2]=-0.957092; # macierz kowariancji C1 m3<<-c(6,-6) C3<<-matrix(NA,2,2); C3[1,1]<<-0.2; C3[1,2]<<-C3[2,1]<<-0.1; C3[2,2]<<-0.3 e3<<-eigen(C3); E3<<-e3$vectors; V3<<-e3$values } przygotuj() fitness=trzygauss #fitness=function(x){return ( -cos(6*pi*(rowSums(x^2)))-0.1*rowSums(x^2))} #library(f11) #initialize_benchmark(runif(1),30) #fitness=function(x){ # wynik=x[1,] # for(i in 1:dim(x)[1]) # wynik[i]=compute_benchmark(x[i,]) # return(wynik) #} #fitness=function(x){return (matrix(1))} set.seed(15) ae=DMEA(t(c(-10,-10)),t(c(10,10)), trzygauss, init.unif, t(c(-10,-10)),t(c(10,10)),repair.project.narrow,mutation.DE,arch.past=10,F=0.5, MAXGEN=100) plot(ae$archive.fits,type="l")