This repository has been archived by the owner on Jan 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 75
/
Copy pathdemo_sawyer_gripper_env.py
71 lines (54 loc) · 1.91 KB
/
demo_sawyer_gripper_env.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import gym
import sawyer_gripper_env # noqa: F401
class GraspingPolicy(torch.nn.Module):
def __init__(self, env):
super().__init__()
self.env = env
self.t = 0
def forward(self, states=None):
action = self.env.action_space.new()
if not states:
return action
z_low, z_high = 0.05, 0.4
dz = 0.02
w_open, w_close = 0.11, 0.05
gripper_force = 20
if self.t < 50:
action.end_effector.position = states.object.position + [0, 0, z_high]
action.end_effector.orientation = [0.0, 1, 0.0, 0.0]
action.gripper_width = w_open
elif self.t < 100:
s = (self.t - 50) / 50
z = z_high - s * (z_high - z_low)
action.end_effector.position = states.object.position + [0, 0, z]
elif self.t < 150:
action.gripper_width = w_close
action.gripper_force = gripper_force
elif self.t < 220:
delta = [0, 0, dz]
action.end_effector.position = states.robot.end_effector.position + delta
action.gripper_width = w_close
action.gripper_force = gripper_force
else:
action.gripper_width = w_close
self.t += 1
return action
def main():
env = gym.make("sawyer-gripper-v0")
print (f"Env observation space: {env.observation_space}")
env.reset()
# Create a hard-coded grasping policy
policy = GraspingPolicy(env)
# Set the initial state (obs) to None, done to False
obs, done = None, False
while not done:
env.render()
action = policy(obs)
obs, reward, done, info = env.step(action)
env.close()
if __name__ == "__main__":
main()