"""Variant of the HalfCheetahEnv with different target velocity."""
import numpy as np
from garage.envs.mujoco.half_cheetah_env_meta_base import HalfCheetahEnvMetaBase # noqa: E501
[docs]class HalfCheetahVelEnv(HalfCheetahEnvMetaBase):
"""Half-cheetah environment with target velocity, as described in [1].
The code is adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
The half-cheetah follows the dynamics from MuJoCo [2], and receives at each
time step a reward composed of a control cost and a penalty equal to the
difference between its current velocity and the target velocity. The tasks
are generated by sampling the target velocities from the uniform
distribution on [0, 2].
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Emanuel Todorov, Tom Erez, Yuval Tassa, "MuJoCo: A physics engine for
model-based control", 2012
(https://homes.cs.washington.edu/~todorov/papers/TodorovIROS12.pdf)
Args:
task (dict or None):
velocity (float): Target velocity, usually between 0 and 2.
"""
def __init__(self, task=None):
super().__init__(task or {'velocity': 0.})
[docs] def step(self, action):
"""Take one step in the environment.
Equivalent to step in HalfCheetahEnv, but with different rewards.
Args:
action (np.ndarray): The action to take in the environment.
Returns:
tuple:
* observation (np.ndarray): The observation of the environment.
* reward (float): The reward acquired at this time step.
* done (boolean): Whether the environment was completed at this
time step. Always False for this environment.
* infos (dict):
* reward_forward (float): Reward for moving, ignoring the
control cost.
* reward_ctrl (float): The reward for acting i.e. the
control cost (always negative).
* task_vel (float): Target velocity.
Usually between 0 and 2.
"""
xposbefore = self.sim.data.qpos[0]
self.do_simulation(action, self.frame_skip)
xposafter = self.sim.data.qpos[0]
forward_vel = (xposafter - xposbefore) / self.dt
forward_reward = -1.0 * abs(forward_vel - self._task['velocity'])
ctrl_cost = 0.5 * 1e-1 * np.sum(np.square(action))
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
infos = dict(reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
task_vel=self._task['velocity'])
return observation, reward, done, infos
[docs] def sample_tasks(self, num_tasks):
"""Sample a list of `num_tasks` tasks.
Args:
num_tasks (int): Number of tasks to sample.
Returns:
list[dict[str, float]]: A list of "tasks," where each task is a
dictionary containing a single key, "velocity", mapping to a
value between 0 and 2.
"""
velocities = self.np_random.uniform(0.0, 2.0, size=(num_tasks, ))
tasks = [{'velocity': velocity} for velocity in velocities]
return tasks
[docs] def set_task(self, task):
"""Reset with a task.
Args:
task (dict[str, float]): A task (a dictionary containing a single
key, "velocity", usually between 0 and 2).
"""
self._task = task