Skip to content

Commit

Permalink
Update
Browse files Browse the repository at this point in the history
  • Loading branch information
mjlbach committed Apr 12, 2022
1 parent 5eff8c2 commit d0b212d
Show file tree
Hide file tree
Showing 11 changed files with 68 additions and 76 deletions.
31 changes: 16 additions & 15 deletions eval.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import re
from pathlib import Path

import cv2
import hydra
from omegaconf import OmegaConf
import ray
from omegaconf import OmegaConf
from ray.rllib.agents import ppo
from ray.tune.registry import register_env
from pathlib import Path

import ig_navigation
from ig_navigation.callbacks import MetricsCallback


Expand All @@ -20,7 +22,8 @@ def igibson_env_creator(env_config):
physics_timestep=1 / 120.0,
)

@hydra.main(config_path=ssg.CONFIG_PATH, config_name="config")

@hydra.main(config_path=ig_navigation.CONFIG_PATH, config_name="config")
def main(cfg):
ray.init()
env_config = OmegaConf.to_object(cfg)
Expand Down Expand Up @@ -68,14 +71,11 @@ def main(cfg):
successes = 0
frames.append(env.render())

video_folder = Path('eval', cfg.experiment_name, 'videos')
video_folder.mkdir(parents = True, exist_ok = True)
video_path = f'eval_episodes.mp4'
video_folder = Path("eval", cfg.experiment_name, "videos")
video_folder.mkdir(parents=True, exist_ok=True)
video_path = f"eval_episodes.mp4"
video = cv2.VideoWriter(
video_path,
cv2.VideoWriter_fourcc(*"mp4v"),
15,
frames[0].shape[:2]
video_path, cv2.VideoWriter_fourcc(*"mp4v"), 15, frames[0].shape[:2]
)

for _ in range(100):
Expand All @@ -89,15 +89,15 @@ def main(cfg):
obs, reward, done, info = env.step(action)
episode_reward += reward
frames.append(env.render())
for reward, value in info['reward_breakdown'].items():
for reward, value in info["reward_breakdown"].items():
reward_breakdown[reward] += value
if info['success']:
if info["success"]:
assert done
success = True
successes +=1
successes += 1
trials += 1
print('Success: ', success)
print('episode reward: ', episode_reward)
print("Success: ", success)
print("episode reward: ", episode_reward)
for key, value in reward_breakdown.items():
print(f"{key}: {value}")
print()
Expand All @@ -108,5 +108,6 @@ def main(cfg):
video.write(screen)
video.release()


if __name__ == "__main__":
main()
8 changes: 5 additions & 3 deletions ig_navigation/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import os
from ig_navigation.turtlebot import Turtlebot

from igibson.robots.robot_base import REGISTERED_ROBOTS

ROOT_PATH= os.path.dirname(__file__)
CONFIG_PATH=os.path.join(ROOT_PATH, '..', 'configs')
from ig_navigation.turtlebot import Turtlebot

ROOT_PATH = os.path.dirname(__file__)
CONFIG_PATH = os.path.join(ROOT_PATH, "..", "configs")

REGISTERED_ROBOTS["Turtlefast"] = Turtlebot
5 changes: 3 additions & 2 deletions ig_navigation/callbacks.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from ray.rllib.agents.callbacks import DefaultCallbacks
import cv2
from pathlib import Path

import cv2
from ray.rllib.agents.callbacks import DefaultCallbacks


class MetricsCallback(DefaultCallbacks):
def on_episode_step(self, *, worker, base_env, policies, episode, **kwargs):
Expand Down
1 change: 0 additions & 1 deletion ig_navigation/floor_sampler.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import numpy as np
import pybullet as p

from igibson.external.pybullet_tools.utils import (
get_aabb_center,
get_aabb_extent,
Expand Down
28 changes: 17 additions & 11 deletions ig_navigation/igibson_env.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
import argparse

from igibson.envs.igibson_env import iGibsonEnv
import numpy as np

import numpy as np

import gym
import numpy as np
from igibson.envs.igibson_env import iGibsonEnv
from igibson.sensors.bump_sensor import BumpSensor
from igibson.sensors.scan_sensor import ScanSensor
from igibson.sensors.vision_sensor import VisionSensor
from igibson.sensors.bump_sensor import BumpSensor
from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT

from ig_navigation.search_task import SearchTask


class SearchEnv(iGibsonEnv):
metadata = {"render.modes": ["rgb_array"]}

Expand Down Expand Up @@ -113,12 +112,16 @@ def load_observation_space(self):
vision_modalities.append("normal")
if "seg" in self.output:
observation_space["seg"] = self.build_obs_space(
shape=(self.image_height, self.image_width, 1), low=0.0, high=MAX_CLASS_COUNT
shape=(self.image_height, self.image_width, 1),
low=0.0,
high=MAX_CLASS_COUNT,
)
vision_modalities.append("seg")
if "ins_seg" in self.output:
observation_space["ins_seg"] = self.build_obs_space(
shape=(self.image_height, self.image_width, 1), low=0.0, high=MAX_INSTANCE_COUNT
shape=(self.image_height, self.image_width, 1),
low=0.0,
high=MAX_INSTANCE_COUNT,
)
vision_modalities.append("ins_seg")
if "rgb_filled" in self.output: # use filler
Expand All @@ -134,9 +137,13 @@ def load_observation_space(self):
if "scan" in self.output:
self.n_horizontal_rays = self.config.get("n_horizontal_rays", 128)
self.n_vertical_beams = self.config.get("n_vertical_beams", 1)
assert self.n_vertical_beams == 1, "scan can only handle one vertical beam for now"
assert (
self.n_vertical_beams == 1
), "scan can only handle one vertical beam for now"
observation_space["scan"] = self.build_obs_space(
shape=(self.n_horizontal_rays * self.n_vertical_beams, 1), low=0.0, high=1.0
shape=(self.n_horizontal_rays * self.n_vertical_beams, 1),
low=0.0,
high=1.0,
)
scan_modalities.append("scan")
if "occupancy_grid" in self.output:
Expand Down Expand Up @@ -188,7 +195,6 @@ def get_state(self):
return state



if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
Expand Down
5 changes: 2 additions & 3 deletions ig_navigation/search_reward.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,7 @@
from igibson.reward_functions.reward_function_base import BaseRewardFunction
from igibson.utils.utils import l2_distance
import numpy as np

from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot
from igibson.reward_functions.reward_function_base import BaseRewardFunction
from igibson.utils.utils import l2_distance


class SearchReward(BaseRewardFunction):
Expand Down
40 changes: 12 additions & 28 deletions ig_navigation/search_task.py
Original file line number Diff line number Diff line change
@@ -1,41 +1,24 @@
import numpy as np
import os

import numpy as np
from bddl.object_taxonomy import ObjectTaxonomy
from igibson.utils.assets_utils import (
get_ig_avg_category_specs,
get_ig_category_path,
get_ig_model_path,
)

from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot
from igibson.objects.articulated_object import URDFObject
from igibson.tasks.task_base import BaseTask

from ig_navigation.floor_sampler import sample_on_floor
from ig_navigation.search_reward import SearchReward, PotentialReward, PotentialReward
from igibson.termination_conditions.timeout import Timeout

from igibson.termination_conditions.termination_condition_base import (
BaseTerminationCondition,
)
from igibson.utils.utils import l2_distance
import numpy as np
import os

from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot

from bddl.object_taxonomy import ObjectTaxonomy
from igibson.termination_conditions.timeout import Timeout
from igibson.utils.assets_utils import (
get_ig_avg_category_specs,
get_ig_category_path,
get_ig_model_path,
)
from igibson.utils.utils import l2_distance

from igibson.objects.articulated_object import URDFObject
from igibson.tasks.task_base import BaseTask

from igibson.termination_conditions.timeout import Timeout
from ig_navigation.floor_sampler import sample_on_floor
from ig_navigation.search_reward import PotentialReward, SearchReward

import os

class SearchTermination(BaseTerminationCondition):
"""
Expand All @@ -62,7 +45,10 @@ def get_termination(self, task, env):
)
< self.dist_tol
)
in_view = (task.target_obj.main_body in env.robots[0].states[ObjectsInFOVOfRobot].get_value())
in_view = (
task.target_obj.main_body
in env.robots[0].states[ObjectsInFOVOfRobot].get_value()
)
done = done and in_view
success = done
return done, success
Expand Down Expand Up @@ -100,9 +86,7 @@ def get_sampleable_categories(self):

def choose_task(self):
# obj_pro = self.import_object(wordnet_category = 'microwave.n.02' , model='7128')
obj_pro = self.import_object(
igibson_category="microwave", model="7320"
)
obj_pro = self.import_object(igibson_category="microwave", model="7320")
self.target_obj = obj_pro
room = np.random.choice(np.array(list(self.scene.room_ins_name_to_ins_id)))
sample_on_floor(obj_pro, self.scene, room=room)
Expand Down
8 changes: 3 additions & 5 deletions ig_navigation/turtlebot.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,11 @@
import os

import numpy as np

import igibson
from igibson.robots.two_wheel_robot import TwoWheelRobot
import gym
from scipy.spatial.transform import Rotation as R
import igibson
import numpy as np
import pybullet as p
from igibson.robots.two_wheel_robot import TwoWheelRobot
from scipy.spatial.transform import Rotation as R


class Turtlebot(TwoWheelRobot):
Expand Down
3 changes: 2 additions & 1 deletion keyboard_demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@

import cv2
import hydra
import matplotlib.pyplot as plt
from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings
from omegaconf import OmegaConf

import ig_navigation
from ig_navigation.igibson_env import SearchEnv
import matplotlib.pyplot as plt


@hydra.main(config_path=ig_navigation.CONFIG_PATH, config_name="config")
def main(cfg):
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@
name="ig_navigation",
version="0.0.1",
author="Stanford University",
packages=['ig_navigation']
packages=["ig_navigation"],
)
13 changes: 7 additions & 6 deletions train.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,16 @@
import re
import numpy as np
from pathlib import Path

import hydra
from omegaconf import OmegaConf
import numpy as np
import ray
from omegaconf import OmegaConf
from ray.rllib.agents import ppo
from ray.tune.registry import register_env
from pathlib import Path
from ray.tune.logger import UnifiedLogger
from ray.tune.registry import register_env

import ig_navigation
from ig_navigation.callbacks import MetricsCallback, DummyCallback
from ig_navigation.callbacks import DummyCallback, MetricsCallback


def igibson_env_creator(env_config):
Expand Down Expand Up @@ -78,7 +79,7 @@ def main(cfg):
Path(log_path).mkdir(parents=True, exist_ok=True)
trainer = ppo.PPOTrainer(
config,
logger_creator=lambda x: UnifiedLogger(x, log_path),
logger_creator=lambda x: UnifiedLogger(x, log_path), # type: ignore
)

if Path(checkpoint_path).exists():
Expand Down

0 comments on commit d0b212d

Please sign in to comment.