|
import gymimport torchimport numpy as npimport argparsefrom collections import dequeimport torch.nn as nnimport torch.optim as optimimport torch.nn.functional as Ffrom torch.distributions import Normal, kl_divergenceimport osos.add_dll_directory(&#34;C:\\Users\\11339\\.mujoco\\mjpro150\\bin&#34;)#理解Actor-Critic的关键是什么?(附代码及代码分析) - 知乎 https://zhuanlan.zhihu.com/p/110998399#如何理解策略梯度(Policy Gradient)算法?(附代码及代码解释) - 知乎 https://zhuanlan.zhihu.com/p/110881517#如何直观理解PPO算法?[理论篇] - 知乎https://zhuanlan.zhihu.com/p/111049450lr_actor = 0.0003lr_critic = 0.0003Iter = 15000MAX_STEP = 10000gamma =0.98lambd = 0.98batch_size = 64epsilon = 0.2l2_rate = 0.001beta = 3class Actor(nn.Module): def __init__(self,N_S,N_A): super(Actor,self).__init__() self.fc1 = nn.Linear(N_S,64) self.fc2 = nn.Linear(64,64) self.sigma = nn.Linear(64,N_A) self.mu = nn.Linear(64,N_A) #再缩小10倍,且立即替换原值 self.mu.weight.data.mul_(0.1) self.mu.bias.data.mul_(0.0) # self.set_init([self.fc1,self.fc2, self.mu, self.sigma]) #self.distribution = torch.distributions.Normal #初始化网络参数 def set_init(self,layers): for layer in layers: nn.init.normal_(layer.weight,mean=0.,std=0.1) nn.init.constant_(layer.bias,0.) def forward(self,s): x = torch.tanh(self.fc1(s)) x = torch.tanh(self.fc2(x)) mu = self.mu(x) log_sigma = self.sigma(x) #log_sigma = torch.zeros_like(mu) sigma = torch.exp(log_sigma) return mu,sigma def choose_action(self,s): mu,sigma = self.forward(s) #Pi = self.distribution(mu,sigma) Pi = Normal(mu, sigma) return Pi.sample().numpy()#Critic网洛class Critic(nn.Module): def __init__(self,N_S): super(Critic,self).__init__() self.fc1 = nn.Linear(N_S,64) self.fc2 = nn.Linear(64,64) self.fc3 = nn.Linear(64,1) self.fc3.weight.data.mul_(0.1) self.fc3.bias.data.mul_(0.0) # self.set_init([self.fc1, self.fc2, self.fc2]) def set_init(self,layers): for layer in layers: nn.init.normal_(layer.weight,mean=0.,std=0.1) nn.init.constant_(layer.bias,0.) def forward(self,s): x = torch.tanh(self.fc1(s)) x = torch.tanh(self.fc2(x)) values = self.fc3(x) return valuesclass Ppo: def __init__(self,N_S,N_A): self.actor_net =Actor(N_S,N_A) self.critic_net = Critic(N_S) self.actor_optim = optim.Adam(self.actor_net.parameters(),lr=lr_actor) self.critic_optim = optim.Adam(self.critic_net.parameters(),lr=lr_critic,weight_decay=l2_rate) self.critic_loss_func = torch.nn.MSELoss() def train(self,memory): memory = np.array(memory,dtype=object) states = torch.tensor(np.vstack(memory[:,0]),dtype=torch.float32) actions = torch.tensor(list(memory[:,1]),dtype=torch.float32) rewards = torch.tensor(list(memory[:,2]),dtype=torch.float32) masks = torch.tensor(list(memory[:, 3]), dtype=torch.float32) #计算出此N步的V,用于在gae中计算出 td -> adv 回来此流程后, -> 反向给actor critic_net_values = self.critic_net(states) caculate_all_v, actor_advants = self.get_gae(rewards, masks, critic_net_values) #returns,advants = self.get_gae(rewards,critic_net_values) old_mu,old_std = self.actor_net(states) #pi = self.actor_net.distribution(old_mu,old_std) old_pi = Normal(old_mu,old_std) #print(&#34;old_pi:&#34;,old_pi) #找到 actions在老策略中的概率。这里是概率对数,用exp可求出值 old_log_prob = old_pi.log_prob(actions).sum(1,keepdim=True) &#39;&#39;&#39;tensor([[-10.2622], [ -9.5560], [ -9.4539], ..., [ -8.7547], [ -8.2148], [-12.0093]], grad_fn=<SumBackward1>) &#39;&#39;&#39; #print(&#34;old_log_prob:&#34;,old_log_prob) n = len(states) arr = np.arange(n) #1~n for epoch in range(1): np.random.shuffle(arr) #切为大约50批,每批64个数据 for i in range(n//batch_size): b_index = arr[batch_size*i:batch_size*(i+1)] b_states = states[b_index] batch_actor_advants = actor_advants[b_index].unsqueeze(1) b_actions = actions[b_index] b_caculate_v = caculate_all_v[b_index].unsqueeze(1) mu,std = self.actor_net(b_states) #pi = self.actor_net.distribution(mu,std) new_pi = Normal(mu,std) # 计算出新概率,这里针对的都是同样的动作。 new_prob = new_pi.log_prob(b_actions).sum(1,keepdim=True) #计算出对应数据的旧概率 old_prob = old_log_prob[b_index].detach() #KL散度正则项 # KL_penalty = self.kl_divergence(old_mu[b_index],old_std[b_index],mu,std) critic_values = self.critic_net(b_states) #最初:[-1.9531e-03],[-6.4709e-04],[ 1.3893e-03],,后面:[22.8951],[23.0203], #critic_loss = self.critic_loss_func(critic_values,b_returns) #b_caculate_v,通过监督学习,让critic_loss越来越小。最终critic网络能够准确估算出V时, #critic_advantage最初:17160,后面:103 b_returns最初-120,后面20左右 [ 5.9501],[16.8177], critic_advantage = critic_values - b_caculate_v #print(&#34;critic_advantage:&#34;,critic_advantage) critic_loss = torch.mean(torch.square(critic_advantage)) # with open(&#39;C:\\Users\\11339\\Desktop\\1\\temp.txt&#39;, &#39;a+&#39;, encoding=&#39;utf-8&#39;) as f: # print(&#34;critic_values{}&#34;.format(critic_values),&#34;b_caculate_v{}&#34;.format(b_caculate_v),&#34;critic_loss{}&#34;.format(critic_loss),file=f) self.critic_optim.zero_grad() critic_loss.backward() self.critic_optim.step() #重要性采样的比值,这样就可以把旧数据,用于当前新ACTOR的学习更新了。 ratio = torch.exp(new_prob-old_prob) #ACTOR的梯度 surrogate_loss:代理 surrogate_loss = ratio * batch_actor_advants clipped_ratio = torch.clamp(ratio,1.0-epsilon,1.0+epsilon) clipped_loss =clipped_ratio*batch_actor_advants actor_loss = -torch.min(surrogate_loss,clipped_loss).mean() #actor_loss = -(surrogate_loss-beta*KL_penalty).mean() # with open(&#39;C:\\Users\\11339\\Desktop\\1\\temp2.txt&#39;, &#39;a+&#39;, encoding=&#39;utf-8&#39;) as f: # print(&#34;ratio{}&#34;.format(ratio),&#34;surrogate_loss{}&#34;.format(surrogate_loss),&#34;clipped_loss{}&#34;.format(clipped_loss),&#34;actor_loss{}&#34;.format(actor_loss),file=f) self.actor_optim.zero_grad() actor_loss.backward() self.actor_optim.step() #计算KL散度 def kl_divergence(self,old_mu,old_sigma,mu,sigma): old_mu = old_mu.detach() old_sigma = old_sigma.detach() pi_old = Normal(old_mu,old_sigma) pi_new = Normal(mu,sigma) kl = kl_divergence(pi_old, pi_new) kl_mean = torch.mean(kl) &#39;&#39;&#39; kl = torch.log(old_sigma) - torch.log(sigma) + (old_sigma.pow(2) + (old_mu - mu).pow(2)) / \ (2.0 * sigma.pow(2)) - 0.5 return kl.sum(1, keepdim=True) &#39;&#39;&#39; return kl_mean #计算GAE def get_gae(self, rewards, masks, cri_net_values): rewards = torch.Tensor(rewards) masks = torch.Tensor(masks) caculate_allstep_v = torch.zeros_like(rewards) #:生成和括号内变量维度维度一致的全是零的内容。 advants = torch.zeros_like(rewards) current_step_v = 0 previous_value = 0 running_advants = 0 for t in reversed(range(0, len(rewards))): #1.计算每一步的Q值. #running_returns后期数值越来越大,比如100,即使gamma=0.98,也会有2的减少;所以和rewards[t]有一个抵消;最终回归至 = r/(1-gamma) #这里有个中断,masks[t]=0时,则后面部分全部归零,重新开始。 current_step_v = rewards[t] + gamma * current_step_v * masks[t] caculate_allstep_v[t] = current_step_v # with open(&#39;C:\\Users\\11339\\Desktop\\1\\temp.txt&#39;, &#39;a+&#39;, encoding=&#39;utf-8&#39;) as f: # print(&#34; t {}&#34;.format(t),current_step_v,&#34;rewards{}&#34;.format(rewards[t]),&#34;masks{}&#34;.format(masks[t]),file=f) #2.计算action的ADV = Q - V, 也就是td_error, = r + v(t+1)- v(t) #ADVANTS是反向给ACTOR的;ACTOR不断修正策略,控制ACTION,获得REWARD,最终使CRITIC的TD 逐渐为0时 running_tderror = rewards[t] + gamma * previous_value * masks[t] - \ cri_net_values.data[t] #对下一循环,cri_net_values.data[t+1]了 previous_value = cri_net_values.data[t] #计算累加的 ADV 用了 g,l 两个超参数 running_advants = running_tderror + gamma * lambd * \ running_advants * masks[t] advants[t] = running_advants # with open(&#39;C:\\Users\\11339\\Desktop\\1\\temp3.txt&#39;, &#39;a+&#39;, encoding=&#39;utf-8&#39;) as f: # print(&#34;rewards{}:&#34;.format(t),rewards[t], &#34; cri_net_values.data:{}&#34;.format(cri_net_values.data[t]), &#34; running_tderror:{}&#34;.format(running_tderror), # &#34; running_advants{}:&#34;.format(running_advants), file=f) # rewards0: tensor(-0.9311) # cri_net_values.data: tensor([31.0485]) # running_tderror: tensor([-1.2627]) # running_advantstensor([4.5656]): # rewards2590: tensor(-1.5822) # cri_net_values.data: tensor([16.6860]) # running_tderror: tensor([-18.2682]) 最初的running_tderror是比较大的;慢慢变小 # running_advantstensor([-18.2682]): 这是代替Q值的,所以要根据步数作累加;但是通过系数gamma * lambd *作了控制。 #advants的归一化 #advants = (advants - advants.mean()) / advants.std() # #print(&#34;rewards&#34;,rewards) #从最右往左的G值,累加到第1步的G [-40.8928, -39.5801, -40.4270, ..., -1.0499, -1.3989, -2.0225] #print(&#34;advants&#34;, advants) #对于V的偏差:分大小于0,[0.5809, 0.6834, 0.6090, ..., 2.7913, 2.7713, 2.6205] return caculate_allstep_v, advantsparser = argparse.ArgumentParser()parser.add_argument(&#39;--env_name&#39;, type=str, default=&#34;Ant-v3&#34;, help=&#39;name of Mujoco environement&#39;)args = parser.parse_args()env = gym.make(args.env_name)N_S = env.observation_space.shape[0]N_A = env.action_space.shape[0]#初始化随机种子env.seed(500)torch.manual_seed(500)np.random.seed(500)##状态的归一化class Nomalize: def __init__(self, N_S): self.mean = np.zeros((N_S,)) self.std = np.zeros((N_S,)) self.stdd = np.zeros((N_S,)) self.n = 0 # 可以像函数一样调用类 def __call__(self, x): x = np.asarray(x) self.n += 1 # print(&#34;---self.n_____&#34;,self.n),此值每调用一次会累加1 if self.n == 1: self.mean = x else: # 更新样本均值和方差 old_mean = self.mean.copy() # print(&#34;---old_mean&#34;,old_mean) self.mean = old_mean + (x - old_mean) / self.n # print(&#34;-----self.mean:&#34;, self.mean) ,迭代求均值 # self.stdd = self.stdd + (x - old_mean) * (x - self.mean) # 状态归一化。求标准差。前面是N步累积的方差,这里要/N if self.n > 1: self.std = np.sqrt(self.stdd / (self.n - 1)) else: self.std = self.mean x = x - self.mean x = x / (self.std + 1e-8) x = np.clip(x, -5, +5) return xppo = Ppo(N_S,N_A)nomalize = Nomalize(N_S)eva_episodes = 0episodes = 0 #最内层-1层的循环次数,累加,用于计算一个iter多少个episodessteps = 0 #最内层-1层的控制参数(<2048),用于控制每次取3000步。刚开始的一个episodes是几步(inner_—), # 小于999之前,都放在一起;基本上inner_首次=999了,也就是能一次走1000步了(加前面的步数就会大于2048,就会跳出while,开始训练。 # 可见这是一个和训练对象的环境设置(1000步中断)相关的数值inner_ = 0 #最内层的次数、步数:每N步,称为一个episodes:iter =0 #每3000步,称为一个ITERtarget_x =0#15000for iter in range(Iter): memory = deque() scores = [] steps = 0 import time t0 = time.time() episodes =0 while steps <2048: #Horizen ,超过这个步数,才开始一次训练。否则继续添加MEMORY #print(&#34;*&#34;*10,steps) episodes += 1 #print(&#34;episodes:&#34;,episodes) #print(&#34;steps:&#34;,steps) #归一化s s = nomalize(env.reset()) last_x = 0 last_Y = 0 score = 0 #print(&#34;inner_---&#34;, inner_) #inner_ = 0 #10000,实际运行ant即使算法很好,每1000步也肯定会中断。实际最多999 # 而前面算法不好时,如果频数不够2048,就只能 ep_reward =0 #MAX_STEP = 1000 for inner_ in range(MAX_STEP): steps += 1 #选择行为 a=ppo.actor_net.choose_action(torch.from_numpy(np.array(s).astype(np.float32)).unsqueeze(0))[0] s_ , r1 ,done,info = env.step(a) #---info---- { # &#39;reward_forward&#39;: 0.6021526736379457, # &#39;reward_ctrl&#39;: -1.05205392837524427104922580199391, # &#39;reward_contact&#39;: -0.0013451591993611572, # &#39;reward_survive&#39;: 1.0 # &#39;x_position&#39;: 3.181932573738666, # &#39;y_position&#39;: 1.4739629694915288, # &#39;distance_from_origin&#39;: 3.506745177404748, # &#39;x_velocity&#39;: -0.6021526736379457, # &#39;y_velocity&#39;: 0.3332341023264185, # &#39;forward_reward&#39;: -0.6021526736379457} up_dis = (info[&#34;y_position&#34;]-last_Y) # right_dis = (info[&#34;x_position&#34;]-last_x) # # print(&#34;----up_dis---&#34;,up_dis) #将r更改为向上移动的奖励 r = up_dis*20 + info[&#34;reward_survive&#34;] + info[&#34;reward_ctrl&#34;] + info[&#34;reward_contact&#34;] #r2 = right_dis * 20 + info[&#34;reward_survive&#34;] + info[&#34;reward_ctrl&#34;] + info[&#34;reward_contact&#34;] ep_reward = ep_reward +r #print(&#34;r----&#34;,r) &#39;&#39;&#39; print(info) if(_!=0)and(r2-r1)>0.01: print(&#34;--right_dis--&#34;,right_dis) print(&#34;-------r1----&#34;,r1) print(&#34;-----------&#34;, r2 - r1) &#39;&#39;&#39; last_Y = info[&#34;y_position&#34;] last_x = info[&#34;x_position&#34;] #env.render() # 归一化s s_ = nomalize(s_) mask = (1 - done) * 1 if (_ != 0): #排除复位后的一次错误计算(拿不到准确的X/Y坐标) memory.append([s, a, r, mask]) score += r s = s_ if done: break with open(&#39;log_&#39; + args.env_name + &#39;.txt&#39;, &#39;a&#39;) as outfile: outfile.write(&#39;\t&#39; + str(episodes) + &#39;\t&#39; + str(score) + &#39;\n&#39;) #print(&#34;score------&#34;,score) 最后是1000,2000,3000,三个SCORE加起来,平均 #print(&#34;steps------&#34;,steps) scores.append(score) #前面可能是多次,比如一个2048步,要30次才走完;后面变成1,1000,2000,三次。所以这里取均值 score_avg = np.mean(scores) score_sum = np.sum(scores) # print(&#34;---iter---:&#34;, iter) # print(&#39;v30,{} episode score_avg is {:.2f}&#39;.format(episodes, score_avg)) print(&#39;v30,{} episode score_sum is {:.2f}&#39;.format(episodes,score_sum)) print( &#39;Episode: {}/iter {} | Episode Reward: {:.4f} | Running Time: {:.4f}&#39;.format( episodes, iter, ep_reward,time.time() - t0) ) if iter % 200 == 0: torch.save(ppo.actor_net.state_dict(), &#39;./model/ppo_actor_{}&#39;.format(iter)) torch.save(ppo.critic_net.state_dict(), &#39;./model/ppo_critic_{}&#39;.format(iter)) #每隔一定的timesteps 进行参数更新, memory_len最大是3000,即走了2000步,又再走1000步 print(&#34;@&#34;*10,memory.__len__()) ppo.train(memory) |
|