-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain_test_ddpg.py
More file actions
53 lines (42 loc) · 1.5 KB
/
main_test_ddpg.py
File metadata and controls
53 lines (42 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import os.path
import gym
import torch
import torch.nn as nn
import pygame
import numpy as np
#设置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device type: ", device)
# 初始化环境
env = gym.make(id='Pendulum-v1', render_mode="human")
STATE_DIM = env.observation_space.shape[0]
ACTION_DIM = env.action_space.shape[0]
#载入模型文件
current_path = os.path.dirname(os.path.realpath(__file__))
model = current_path + '/models/'
actor_path = model + "ddpg_actor_20241121163358.path"
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim=64):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, action_dim)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.tanh(self.fc3(x)) * 2
return x
actor = Actor(STATE_DIM, ACTION_DIM).to(device)
actor.load_state_dict(torch.load(actor_path))
NUM_EPISODE = 30
NUM_STEP = 200
for episode_i in range(NUM_EPISODE):
state, others = env.reset()
episode_reward = 0
for step_i in range(NUM_STEP):
action = actor(torch.FloatTensor(state).unsqueeze(0).to(device)).detach().cpu().numpy()[0]
next_state, reward, done, truncation, info = env.step(action)
state = next_state
episode_reward += reward
print(f"Episode: {episode_i}, Reward: {episode_reward}")
env.close()