`
brinado
  • 浏览: 19566 次
  • 性别: Icon_minigender_1
  • 来自: 大连
最近访客 更多访客>>
社区版块
存档分类
最新评论

神经网络和遗传算法(NN&GA)

    博客分类:
  • ruby
阅读更多
在软计算课程上,老师让我们编程实现Hopfield网、联想记忆、BM、BP、SOFM、遗传算法。正在学习Ruby,就用ruby实现了上述内容。上述实现的内容比较简单,但没有对每个程序的输入和输出做说明。如果有任何疑问可以留言。希望和大家交流!


Hopfield

w = Array.new(4)
4.times do |i|
  w[i] = Array.new(4,0)
  4.times do |j|
    if j > i
      w[i][j] = (rand 101) / 100.0 * (-1) ** (rand 2)
    else
      w[i][j]=w[j][i]
    end
  end
end

u=Array.new(4)
4.times do |i|
  u[i] = (rand 101) / 100.0 * (-1) ** (rand 2)
end

target = Array.new(16)
16.times do |i|
  target[i] = Array.new(4)
  
  if i == 0
    target[0] = [-1,-1,-1,-1]
    next
  end
  if i > 0
    4.times do |k|
      t = target[i-1][k]
      target[i][k] = t
    end
    4.times do |j|
      ttt = target[i-1][3-j]
      if ttt == -1
        target[i][3-j] = 1
        break
      else
        target[i][3-j] = -1
      end
    end
  end
end

16.times do |k|
  energy = 0
  4.times do |i|
    4.times do |j|
      energy += -0.5*w[i][j]*target[k][i]*target[k][j]
    end
    energy+=u[i]*target[k][i]
  end
  puts target[k].join(" ") +"\t\t"+ energy.to_s
end

16.times do |i|
  tem = ""
   puts "\n"
  4.times do |k|
    tem += target[i][k].to_s + " "
  end
  puts tem
  tem = ""
  total = 0
 
  30.times do |j|
    total = 0
    4.times do |k|
      kk = target[i][k]
      if kk == 1
        total += 2 ** (3-k)
      end
    end
    if total/10 == 1
      tem += total.to_s + "  "
    else
      tem += " " + total.to_s + " "
    end
    
    total=0
    rRound = rand 4
    4.times do |k|
      total += w[rRound][k] * target[i][k]
    end
    if total-u[rRound] >= 0
    target[i][rRound] = 1
    else
      target[i][rRound] = -1
    end
  end
    
  total = 0
  4.times do |j|
    4.times do |k|
      total += -0.5 * w[j][k] * target[i][j] * target[i][k]
    end
    total += u[j] * target[i][j]
  end
  tem += "   " + total.to_s
  puts tem
end






BM算法

initial=[1,-1,1,-1]
t,n=1,1000

w = Array.new(4)
4.times do |i|
  w[i] = Array.new(4,0)
  4.times do |j|
    if j > i
      w[i][j] = (rand 101) / 100.0 * (-1) ** (rand 2)
    else
      w[i][j]=w[j][i]
    end
  end
end

u=Array.new(4)
4.times do |i|
  u[i] = (rand 101) / 100.0 * (-1) ** (rand 2)
end



def p(tt,mm)
  1.0/(1+Math.exp( -tt/mm ))
end

while n>0.01
select=rand 4
hi=0
  4.times do |j|
    hi+=w[select][j]*initial[j]
  end
hi-=u[select]
if hi>0
  initial[select]=1
else
  a=p(hi,n)
  
  if n>10
    b=(rand 700)/1000.0
  else
    b=0.5+(rand 200)/1000.0
  end

  if a > b
    initial[select]=1
  else
    initial[select]=-1
  end
end
n=1000/(1+t)
t+=1
end

total=0
4.times do |i|
  4.times do |j|
    total+=-0.5*w[i][j]*initial[i]*initial[j]
  end
  total+=u[i]*initial[i]
end
puts initial.join("\t")+"\t\t"+total.to_s
puts "\n"

target = Array.new(16)
16.times do |i|
  target[i] = Array.new(4)
  
  if i == 0
    target[0] = [-1,-1,-1,-1]
    next
  end
  if i > 0
    4.times do |k|
      t = target[i-1][k]
      target[i][k] = t
    end
    4.times do |j|
      ttt = target[i-1][3-j]
      if ttt == -1
        target[i][3-j] = 1
        break
      else
        target[i][3-j] = -1
      end
    end
  end
end

16.times do |k|
  energy = 0
  4.times do |i|
    4.times do |j|
      energy += -0.5*w[i][j]*target[k][i]*target[k][j]
    end
    energy+=u[i]*target[k][i]
  end
  puts target[k].join(" ") +"\t\t"+ energy.to_s
end

16.times do |i|
  tem = ""
   puts "\n"
  4.times do |k|
    tem += target[i][k].to_s + " "
  end
  puts tem
  tem = ""
  total = 0
 
  30.times do |j|
    total = 0
    4.times do |k|
      kk = target[i][k]
      if kk == 1
        total += 2 ** (3-k)
      end
    end
    if total/10 == 1
      tem += total.to_s + "  "
    else
      tem += " " + total.to_s + " "
    end
    
    total=0
    rRound = rand 4
    4.times do |k|
      total += w[rRound][k] * target[i][k]
    end
    if total-u[rRound] >= 0
    target[i][rRound] = 1
    else
      target[i][rRound] = -1
    end
  end
    
  total = 0
  4.times do |j|
    4.times do |k|
      total += -0.5 * w[j][k] * target[i][j] * target[i][k]
    end
    total += u[j] * target[i][j]
  end
  tem += "   " + total.to_s
  puts tem
end






BP算法

#BP Algorithm endeavor
def f(t)
  m=1.0/(1.0+Math.exp(-t))
end

num_mode=8
num_input_nerve=3
num_middle_nerve=20
num_output_nerve=2
num_learntimes=200
affix=0.5

d=Array.new(num_mode)
num_mode.times do |i|
  d[i]=Array.new(num_output_nerve)
  num_output_nerve.times do |j|
    d[i][j]=(rand 101)/100.0
  end
end

input=Array.new(num_mode)
num_mode.times do |i|
  input[i]=Array.new(num_input_nerve)
  num_input_nerve.times do |j|
    input[i][j]=(rand 101)/100.0
  end
end

v=Array.new(num_input_nerve)
num_input_nerve.times do |i|
  v[i]=Array.new(num_middle_nerve)
  num_middle_nerve.times do |j|
    v[i][j]=(rand 101)/100.0*(-1)**(rand 2)
  end
end

r=Array.new(num_middle_nerve)
num_middle_nerve.times do |i|
  r[i]=(rand 101)/100.0*(-1)**(rand 2)
end

middle=Array.new()

w=Array.new(num_middle_nerve)
num_middle_nerve.times do |i|
  w[i]=Array.new(num_output_nerve)
  num_output_nerve.times do |j|
    w[i][j]=(rand 101)/100.0*(-1)**(rand 2)
  end
end

s=Array.new(num_middle_nerve)
num_middle_nerve.times do |i|
  s[i]=(rand 101)/100.0*(-1)**(rand 2)
end

output=Array.new()

#starts learning
num_learntimes.times do |g|
  num_mode.times do |k|
    
    middle=Array.new(num_middle_nerve,0)
    output=Array.new(num_output_nerve,0)
    help_w=Array.new(num_output_nerve,0)
    help_v=Array.new(num_middle_nerve,0)
    
    #figure out results
    num_middle_nerve.times do |i|
      num_input_nerve.times do |j|
        middle[i]+=input[k][j]*v[j][i]
      end
      middle[i]=f(middle[i]-r[i])
    end
    num_output_nerve.times do |i|
      num_middle_nerve.times do |j|
        output[i]+=middle[j]*w[j][i]
      end
      output[i]=f(output[i]-s[i])
    end
    
    #adjustment
    num_output_nerve.times do |i|
      help_w[i]=(d[k][i]-output[i])*output[i]*(1-output[i])
    end
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        help_v[i]+=help_w[j]*w[i][j]
      end
      help_v[i]*=middle[i]*(1-middle[i])
    end
    
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        w[i][j]+=affix*help_w[j]*middle[i]
      end
    end
    num_output_nerve.times do |i|
      s[i]-=affix*help_w[i]
    end
    
    num_input_nerve.times do |i|
      num_middle_nerve.times do |j|
        v[i][j]+=affix*help_v[j]*input[k][i]
      end
    end
    
    num_middle_nerve.times do |i|
      r[i]-=affix*help_v[i]
    end
      
  end
  #end learn 1 time
end

#test on learning times
errorsum=0
puts "results:"
num_mode.times do |k|
  middle=Array.new(num_middle_nerve,0)
  output=Array.new(num_output_nerve,0)
  num_middle_nerve.times do |i|
    num_input_nerve.times do |j|
      middle[i]+=input[k][j]*v[j][i]
    end
    middle[i]=f(middle[i]-r[i])
  end
  num_output_nerve.times do |i|
    num_middle_nerve.times do |j|
      output[i]+=middle[j]*w[j][i]
    end
    output[i]=f(output[i]-s[i])
    errorsum+=(d[k][i]-output[i])**2
  end
  puts "%.2f" %output[0]+"  "+"%.2f" %output[1]
end

errorsum/=2
puts "total deviation:"+errorsum.to_s

num_learntimes=100
#starts learning
num_learntimes.times do |g|
  num_mode.times do |k|
    
    middle=Array.new(num_middle_nerve,0)
    output=Array.new(num_output_nerve,0)
    help_w=Array.new(num_output_nerve,0)
    help_v=Array.new(num_middle_nerve,0)
    
    #figure out results
    num_middle_nerve.times do |i|
      num_input_nerve.times do |j|
        middle[i]+=input[k][j]*v[j][i]
      end
      middle[i]=f(middle[i]-r[i])
    end
    num_output_nerve.times do |i|
      num_middle_nerve.times do |j|
        output[i]+=middle[j]*w[j][i]
      end
      output[i]=f(output[i]-s[i])
    end
    
    #adjustment
    num_output_nerve.times do |i|
      help_w[i]=(d[k][i]-output[i])*output[i]*(1-output[i])
    end
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        help_v[i]+=help_w[j]*w[i][j]
      end
      help_v[i]*=middle[i]*(1-middle[i])
    end
    
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        w[i][j]+=affix*help_w[j]*middle[i]
      end
    end
    num_output_nerve.times do |i|
      s[i]-=affix*help_w[i]
    end
    
    num_input_nerve.times do |i|
      num_middle_nerve.times do |j|
        v[i][j]+=affix*help_v[j]*input[k][i]
      end
    end
    
    num_middle_nerve.times do |i|
      r[i]-=affix*help_v[i]
    end
      
  end
  #end learn 1 time
end

#test on learning times
errorsum=0
puts "results:"
num_mode.times do |k|
  middle=Array.new(num_middle_nerve,0)
  output=Array.new(num_output_nerve,0)
  num_middle_nerve.times do |i|
    num_input_nerve.times do |j|
      middle[i]+=input[k][j]*v[j][i]
    end
    middle[i]=f(middle[i]-r[i])
  end
  num_output_nerve.times do |i|
    num_middle_nerve.times do |j|
      output[i]+=middle[j]*w[j][i]
    end
    output[i]=f(output[i]-s[i])
    errorsum+=(d[k][i]-output[i])**2
  end
  puts "%.2f" %output[0]+"  "+"%.2f" %output[1]
end

errorsum/=2
puts "total deviation:"+errorsum.to_s

num_learntimes=100
#starts learning
num_learntimes.times do |g|
  num_mode.times do |k|
    
    middle=Array.new(num_middle_nerve,0)
    output=Array.new(num_output_nerve,0)
    help_w=Array.new(num_output_nerve,0)
    help_v=Array.new(num_middle_nerve,0)
    
    #figure out results
    num_middle_nerve.times do |i|
      num_input_nerve.times do |j|
        middle[i]+=input[k][j]*v[j][i]
      end
      middle[i]=f(middle[i]-r[i])
    end
    num_output_nerve.times do |i|
      num_middle_nerve.times do |j|
        output[i]+=middle[j]*w[j][i]
      end
      output[i]=f(output[i]-s[i])
    end
    
    #adjustment
    num_output_nerve.times do |i|
      help_w[i]=(d[k][i]-output[i])*output[i]*(1-output[i])
    end
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        help_v[i]+=help_w[j]*w[i][j]
      end
      help_v[i]*=middle[i]*(1-middle[i])
    end
    
    num_middle_nerve.times do |i|
      num_output_nerve.times do |j|
        w[i][j]+=affix*help_w[j]*middle[i]
      end
    end
    num_output_nerve.times do |i|
      s[i]-=affix*help_w[i]
    end
    
    num_input_nerve.times do |i|
      num_middle_nerve.times do |j|
        v[i][j]+=affix*help_v[j]*input[k][i]
      end
    end
    
    num_middle_nerve.times do |i|
      r[i]-=affix*help_v[i]
    end
      
  end
  #end learn 1 time
end

#test on learning times
errorsum=0
puts "results:"
num_mode.times do |k|
  middle=Array.new(num_middle_nerve,0)
  output=Array.new(num_output_nerve,0)
  num_middle_nerve.times do |i|
    num_input_nerve.times do |j|
      middle[i]+=input[k][j]*v[j][i]
    end
    middle[i]=f(middle[i]-r[i])
  end
  num_output_nerve.times do |i|
    num_middle_nerve.times do |j|
      output[i]+=middle[j]*w[j][i]
    end
    output[i]=f(output[i]-s[i])
    errorsum+=(d[k][i]-output[i])**2
  end
  puts "%.2f" %output[0]+"  "+"%.2f" %output[1]
end
puts "want to be:"
num_mode.times do |k|
  puts "%.2f" %d[k][0]+"  "+"%.2f" %d[k][1]
end
errorsum/=2
puts "total deviation:"+errorsum.to_s


联想记忆(Hopfield)
def ppp( aa )
  note = ""
  15.times do |i|
    if aa[i] == 1
      note += " 1\t"
    else
      note += "-1\t"
    end
  end
  note
end

def change( bb, cc )
  15.times do |i|
    if bb[i] == 1
      if ((rand 2) + (rand 2)) / 2 == 0
        cc[i] = 1
      else
        next
      end
    else
      next
    end
  end
  cc
end

u=Array.new(15)
15.times do |i|
  u[i]=(rand 101)/100.0*(-1)**(rand 2)
end

w = Array.new(15,0)
15.times do |i|
  w[i] = Array.new(15,0)
  15.times do |j|
    if j > i
      w[i][j]=(rand 101)/100.0*(-1)**(rand 2)
    else
      w[i][j]=w[j][i]
    end
  end
end

# inital status
ini_status = Array.new(15,0)
15.times do |i|
  if (rand 2) == 0
    ini_status[i] = -1
  else
    ini_status[i] = 1
  end
end
puts "Forehead status",ppp(ini_status)

120.times do |i|
  now = rand 15
  tt = 0
  15.times do |j|
    tt += w[now][j] * ini_status[j]
  end
  if tt-u[now] >= 0
    ini_status[now] = 1
  else
    ini_status[now] = -1
  end
end
puts "\nMemory status(->forehead status)",ppp(ini_status)

temp = 0
15.times do |i|
  15.times do |j|
    temp += -0.5 * w[i][j] * ini_status[i] * ini_status[j]
  end
end
puts "Engergy is" + temp.to_s

target = [-1,1,-1,1,-1,1,1,1,1,1,-1,1,1,-1,1]
puts "\nA letter target(what we want to become)",ppp(target)

test = Array.new(15,-1)
test = [-1,-1,-1,1,-1,1,-1,-1,-1,1,-1,-1,-1,-1,-1]
puts "\nTest object(we want it to become A letter)",ppp(test)
simulate = Array.new(15,0)

15.times do |i|
  if test[i] != target[i]
    simulate[i] = -1 * ini_status[i]
  else
    simulate[i] = ini_status[i]
  end
end
puts "\n#Hopfield new initial status(->memory status(generated by above 3))",ppp(simulate)

120.times do |i|
  now = rand 15
  tt = 0
  15.times do |j|
    tt += w[now][j] * simulate[j]
  end
  if tt-u[now] >= 0
    simulate[now] = 1
  else
    simulate[now] = -1
  end
end
puts "\nFinal status(->hopfield new initial status(by the same w[]))",ppp(simulate)

temp = 0
15.times do |i|
  15.times do |j|
    temp += -0.5 * w[i][j] * simulate[i] * simulate[j]
  end
end
puts "Engergy is" + temp.to_s

judge = "Y"
15.times do |i|
  if ini_status[i] != simulate[i]
    judge = "N"
    puts "\nMemory lost!!!"
    break
  end
end
if judge == "Y"
  puts "\nWe learnt it!! "
end


SOFM网(输入为sin曲线。实现了点的文件生成,再用C语言画图,ruby是在不容易实现画图,除非在Mac系统里)
#SOM another try!
#by Brinado on Apr. 17th, 2008

#initialization
num_mode=80
num_matrix=10
num_learn=60
ita=0.5
sigma=1

#input -- Math.sin
input=Array.new(num_mode)
num_mode.times do |i|
  input[i]=Array.new(2)
  2.times do |j|
    input[i][0]=1.0*i/num_mode
    input[i][1]=Math.sin(Math::PI*2.0*i/num_mode)/2+0.5
  end
end

#w
w=Array.new(100)
100.times do |i|
  w[i]=Array.new(2)
  2.times do |j|
    w[i][j]=0.5+(rand 100)/1000*(-1)**(rand 2)
  end
end

#output
output=Array.new(num_mode)
num_mode.times do |i|
  output[i]=Array.new(2)
  2.times do |j|
    output[i][j]=1.0*i/num_mode
    output[i][j]=Math.sin(Math::PI*2.0*i/num_mode)
  end
end

num_learn.times do |g|
  num_mode.times do |k|
    distance=Array.new(100)
    a,b,tt3=0,0,999
    100.times do |i|
        tt=Math.sqrt((input[k][0]-w[i][0])**2+(input[k][1]-w[i][1])**2)
        #puts tt
        distance[i]=tt
      if tt<tt3
        a,tt3=i,tt
      end
    end
    
    100.times do |i|
      2.times do |j|
        tta1,tta2=(a/9-i/9)**2+(a%9-i%9)**2,sigma**2
        w[i][j]+=ita*Math.exp(-1.0*tta1/sigma)*(input[k][j]-w[i][j])
      end
    end
    
    #insertion here...
  end
  if sigma<0.005
     sigma==0.005
  else
     sigma-=0.005
  end
end


txt = ""
f = File.open("db.txt", "w")
100.times do |i|
    txt = "%.15f" %w[i][0] + " " + "%.15f" %w[i][1] + " "
    f.write(txt)
end
f.close


SOFM网C语言画图实现
#include<stdio.h>
#include<stdlib.h>
#include<graphics.h>
#define MM 19

main()
{
 char ch[MM];
 int i=0,j=0;
 int object[200];
 int graphdriver=VGA;
 int graphmode=VGAHI;

 FILE *f;
 if((f=fopen("[b]c:/turboc2/brinado/db2.txt[/b]","r"))==NULL)
 {
  printf("db.txt open error.\n");
  exit(0);
 }
 while(i<200)
 {
  fgets(ch,MM,f);
  object[i]=(int)((ch[2]-'0')*10+ch[3]-'0'+(ch[4]-'0')*0.1)*4;
  i++;
 }
 fclose(f);


 initgraph(&graphdriver,&graphmode," ");
 cleardevice();
 /*
 setcolor(RED);
 for(j=0;j<200;j+=2)
 {
  if(j==18||j==38||j==58||j==78||j==98||j==118||j==138||j==158||j==178||j==198)
  {
   if(j!=198){line(object[j],object[j+1],object[j+20],object[j+21]);}
   continue;
  }
  line(object[j],object[j+1],object[j+2],object[j+3]);
  if(j<180)
  {
   line(object[j],object[j+1],object[j+20],object[j+21]);
  }
 }
 */
 for(j=0;j<200;j+=2)
 {
  putpixel(object[j],object[j+1],2);
  putpixel(object[j]+1,object[j+1],1);
  putpixel(object[j],object[j+1]+1,1);
  putpixel(object[j],object[j+1]-1,1);
  putpixel(object[j]-1,object[j+1],1);
 }
 setcolor(WHITE);
 rectangle(0,0,639,479);
 getch();
 closegraph();
}


GA-遗传算法
#target:[0 0 0 0 0] [1 1 1 1 1]

#evaluate fitness
def godbenchmark(god)
  result=0
  10.times do |i|
    if i<5
      if god[i]==0
        result+=1
      end
    else
      if god[i]==1
        result+=1
      end
    end
  end
  result #return fitness value
end

#crossover using single point method
#change from 0~10 points totally. When set position===10, operation equals to completely change 2 chrommosomes
def crossover(a,b,position)
  10.times do |j|
    if j>=position
      tt=a[j]
      a[j]=b[j]
      b[j]=tt
    end
  end
end

#initialize chromosome
chromosome=Array.new(6)
6.times do |i|
  chromosome[i]=Array.new(10,0)
  10.times do |j|
    if (rand 2)==1
      chromosome[i][j]=1
    end
  end
end

#parameters
#p_crossover=0.4
#p_mutation=3/60
#num_chromosome=6
#num_gene=10
iterates=50
step=0
record=Array.new(50)

init=Array.new(6,0)
6.times do |i|
  init[i]=Array.new(2)
  init[i][0]=i
  init[i][1]=godbenchmark(chromosome[i])
end
#pop ranking
ttt,t1=init[0][1],0
6.times do |i|
  for j in 0..(5-i)
    if j==0
      ttt,t1=init[0][1],0
    end
    if init[j][1] > ttt
      mm1,mm2=init[j][0],init[j][1]
      init[j][0],init[j][1]=init[t1][0],init[t1][1]
      init[t1][0],init[t1][1]=mm1,mm2
      ttt,t1=init[j][1],j
    else
      ttt,t1=init[j][1],j
    end
  end
end

#select using tournament method
middle=Array.new(6)
6.times do |i|
  middle[i]=Array.new(10)
  if i==0
    10.times do |j|
      middle[i][j]=chromosome[init[i][0]][j]
    end
  else
    10.times do |j|
      middle[i][j]=chromosome[init[i-1][0]][j]
    end
  end
end
chromosome=middle

#iteration begins...
iterates.times do |k|
  #crossover begin
  3.times do |i|
    if (rand 10)/10.0 >= 0.6
      case i
        when 0
          crossover chromosome[0],chromosome[5],(rand 10) #must change anyway!!!
        when 1
          crossover chromosome[1],chromosome[4],(rand 10)
        when 2
          crossover chromosome[2],chromosome[3],(rand 10)
      end
    end
  end
  #crossover end
  
  #mutation begin
  6.times do |i|
    percentage=0.85
    if (rand 100)/100.0 >= percentage
      tt=rand 10
      if chromosome[i][tt]==0
        chromosome[i][tt]=1
      else
        chromosome[i][tt]=0
      end
    end
  end
  #mutation end
  
  #selection begin
  init=Array.new(6,0)
  6.times do |i|
    init[i]=Array.new(2)
    init[i][0]=i
    init[i][1]=godbenchmark(chromosome[i])
  end
  #pop ranking
  ttt,t1=init[0][1],0
  6.times do |i|
    for j in 0..(5-i)
      if j==0
        ttt,t1=init[0][1],0
      end
      if init[j][1] > ttt
        mm1,mm2=init[j][0],init[j][1]
        init[j][0],init[j][1]=init[t1][0],init[t1][1]
        init[t1][0],init[t1][1]=mm1,mm2
        ttt,t1=init[j][1],j
      else
        ttt,t1=init[j][1],j
      end
    end
  end
  #select using tournament method
  middle=Array.new(6)
  6.times do |i|
    middle[i]=Array.new(10)
    if i==0
      10.times do |j|
        middle[i][j]=chromosome[init[i][0]][j]
      end
    else
      10.times do |j|
        middle[i][j]=chromosome[init[i-1][0]][j]
      end
    end
  end
  chromosome=middle
  #select end

  record[step]=godbenchmark(chromosome[0])
  step+=1
  #exit condition
  if godbenchmark(chromosome[0])==10
    break
  end
end

#print initial chromosome
print="--Final Result--\n"
6.times do |i|
  10.times do |j|
    print+=chromosome[i][j].to_s+"\t"
  end
  print+="\n"
end
puts print
puts "\n*Note:this result is the No.#{step} generation's reproduction!","\n*Step change(fitness result) in detail:\n","\n"
step.times do |i|
  puts "No.#{i} step best: "+record[i].to_s
end
分享到:
评论

相关推荐

    遗传算法神经网络,遗传算法神经网络区别,matlab

    遗传算法神经网络是一种结合了遗传算法(Genetic Algorithm, GA)与神经网络(Neural Network, NN)的优化技术,广泛应用于复杂问题的求解,如模式识别、预测分析、函数优化等。在MATLAB环境中,可以利用其强大的...

    Matlab机器人控制的仿真使用遗传算法优化神经网络-机器人控制的仿真,使用遗传算法优化神经网络.rar

    遗传算法则可以通过内置的Global Optimization Toolbox实现,该工具箱提供了实现遗传算法的函数,如`ga`,可以方便地与神经网络训练集成。 在这个项目中,我们首先定义了机器人控制问题的具体目标,比如最小化跟踪...

    神经网络遗传算法极值寻优

    总之,“神经网络遗传算法极值寻优”是结合了两种强大的优化策略,旨在解决非线性优化问题,特别是对于具有多模态和高维度特征的问题。MATLAB作为一种强大的科学计算环境,为实现这一过程提供了丰富的工具和支持。...

    自己总结的有关遗传算法和神经网络算法的相关知识附-遗传算法与神经网络相结合的算法集合.rar

    遗传算法和神经网络的结合,即遗传神经网络(Genetic Programming with Neural Networks,GPNN),旨在利用GA的全局搜索能力来优化神经网络的结构和参数。这种结合可以解决神经网络训练过程中可能出现的局部极小值...

    C++写的遗传算法优化神经网络的源程序

    在IT领域,遗传算法(Genetic Algorithm, GA)和神经网络(Neural Network, NN)是两种非常重要的技术。遗传算法是一种模拟自然选择和遗传机制的优化方法,而神经网络则是模仿人脑神经元结构的计算模型,常用于解决...

    基于遗传算法的神经网络算法

    将遗传算法应用于神经网络的训练过程中,可以实现对网络结构和权重的并行优化。首先,神经元的连接权重和网络结构可以被编码为个体的基因串。接着,通过遗传算法的迭代过程,种群中的优秀网络结构会被保留下来,同时...

    基于神经网络和遗传算法的工业机器人不均匀表面抛光.pdf

    为了克服这一问题,研究者提出了基于神经网络(Neural Network, NNW)和遗传算法(Genetic Algorithm, GA)的工业机器人不均匀表面抛光算法。 神经网络是一种模仿生物神经系统的结构和功能的信息处理系统,通过大量...

    遗传算法神经网络,遗传算法神经网络区别,matlab源码.zip

    遗传算法可以应用于神经网络的权重和偏置参数优化,通过遗传算法的搜索能力找到最佳参数组合,提升神经网络的性能。 遗传算法神经网络的主要区别在于它们的角色和应用范围。遗传算法主要是一种全局优化工具,适用...

    遗传算法神经网络,遗传算法神经网络区别,matlab源码.rar

    遗传算法神经网络(GA-NN)结合了两者的优点,通常用于神经网络的参数优化。在训练神经网络时,GA负责优化权重和阈值,而不是传统的梯度下降法。GA的全局搜索能力可以避免陷入局部最优,尤其是在权重空间维度高、...

    GA-BP-matlab-遗传算法优化的神经网络预测.rar

    在现代数据分析与预测领域,遗传算法(Genetic Algorithm, GA)和神经网络(Neural Network, NN)是两种常用且强大的工具。本资料包“GA-BP-matlab-遗传算法优化的神经网络预测.rar”主要介绍了如何利用MATLAB实现...

    模拟退火,遗传算法,神经网络程序

    综上所述,这个MATLAB程序包含了模拟退火、遗传算法和神经网络的实现,可用于解决优化和预测问题。通过这些算法的结合,我们可以解决复杂问题,如组合优化、函数拟合和模式识别等。在实际应用中,用户可以根据具体...

    神经网络 遗传算法 matlab

    这种方法被称为遗传算法优化的神经网络(GA-NN),在许多实际问题中表现出优越性能,例如在模式识别、预测模型构建、控制系统设计等方面。 例如,在神经网络的训练过程中,初始权重的设定对网络性能有很大影响。...

    遗传神经网络算法

    遗传算法(Genetic Algorithm, GA)源自生物进化论,模拟了自然选择、遗传和突变的过程,而神经网络(Neural Network, NN)则是一种模仿人脑神经元结构的计算模型,用于学习和预测。将两者结合,可以利用遗传算法的...

    遗传算法与神经网络模型源代码

    遗传算法(Genetic Algorithms, GA)与神经网络(Neural Networks, NN)是两种在人工智能领域广泛应用的计算方法。它们分别模拟了生物进化过程和人脑神经元的连接方式,来解决复杂问题。 遗传算法是一种优化技术,...

    毕业设计MATLAB_使用神经网络和遗传算法的乒乓球人工智能.zip

    2. pingpong_GA_NN.m:这可能是整个项目的主函数或核心算法,结合了遗传算法(GA)和神经网络(NN),用于乒乓球AI的训练和决策过程。 3. run.m:这可能是一个启动或运行整个程序的脚本,调用其他子函数以执行整个...

    遗传算法神经网络资料文档大全

    9. "GAbp.rar":这个文件很可能包含了一个用遗传算法优化BP神经网络的完整代码库,可以帮助读者实践和理解遗传算法对BP网络的优化过程。 10. "用遗传算法优化BP神经网络的Matlab编程实例.txt":提供了具体的Matlab...

    基于遗传算法优化的神经网络预测代码

    **基于遗传算法优化的神经网络预测** 神经网络(Neural ...为了更好地理解和应用这个代码,建议熟悉MATLAB编程以及遗传算法和神经网络的基本概念。同时,对数据预处理、模型验证和参数调整的理解也是至关重要的。

    遗传算法小波神经网络

    遗传算法(Genetic Algorithm, GA)与小波神经网络(Wavelet Neural Network, WNN)是两种在计算科学和人工智能领域广泛应用的技术。本篇将详细阐述这两种技术的原理及其结合应用。 遗传算法是一种受到生物进化过程...

    遗传算法优化神经网络代码,保证可用。

    在神经网络领域,遗传算法被广泛应用于权值和阈值的优化,以提高网络的性能和准确性。标题中的"遗传算法优化神经网络代码,保证可用"表明这是一个使用遗传算法对神经网络模型的参数进行优化的实现,确保了代码的可...

Global site tag (gtag.js) - Google Analytics