diff --git a/eval.py b/eval.py index 3da234a..cd06711 100644 --- a/eval.py +++ b/eval.py @@ -1,12 +1,14 @@ import re +from pathlib import Path + import cv2 import hydra -from omegaconf import OmegaConf import ray +from omegaconf import OmegaConf from ray.rllib.agents import ppo from ray.tune.registry import register_env -from pathlib import Path +import ig_navigation from ig_navigation.callbacks import MetricsCallback @@ -20,7 +22,8 @@ def igibson_env_creator(env_config): physics_timestep=1 / 120.0, ) -@hydra.main(config_path=ssg.CONFIG_PATH, config_name="config") + +@hydra.main(config_path=ig_navigation.CONFIG_PATH, config_name="config") def main(cfg): ray.init() env_config = OmegaConf.to_object(cfg) @@ -68,14 +71,11 @@ def main(cfg): successes = 0 frames.append(env.render()) - video_folder = Path('eval', cfg.experiment_name, 'videos') - video_folder.mkdir(parents = True, exist_ok = True) - video_path = f'eval_episodes.mp4' + video_folder = Path("eval", cfg.experiment_name, "videos") + video_folder.mkdir(parents=True, exist_ok=True) + video_path = f"eval_episodes.mp4" video = cv2.VideoWriter( - video_path, - cv2.VideoWriter_fourcc(*"mp4v"), - 15, - frames[0].shape[:2] + video_path, cv2.VideoWriter_fourcc(*"mp4v"), 15, frames[0].shape[:2] ) for _ in range(100): @@ -89,15 +89,15 @@ def main(cfg): obs, reward, done, info = env.step(action) episode_reward += reward frames.append(env.render()) - for reward, value in info['reward_breakdown'].items(): + for reward, value in info["reward_breakdown"].items(): reward_breakdown[reward] += value - if info['success']: + if info["success"]: assert done success = True - successes +=1 + successes += 1 trials += 1 - print('Success: ', success) - print('episode reward: ', episode_reward) + print("Success: ", success) + print("episode reward: ", episode_reward) for key, value in reward_breakdown.items(): print(f"{key}: {value}") print() @@ -108,5 +108,6 @@ def main(cfg): video.write(screen) video.release() + if __name__ == "__main__": main() diff --git a/ig_navigation/__init__.py b/ig_navigation/__init__.py index 10f3abb..c59aae7 100644 --- a/ig_navigation/__init__.py +++ b/ig_navigation/__init__.py @@ -1,8 +1,10 @@ import os -from ig_navigation.turtlebot import Turtlebot + from igibson.robots.robot_base import REGISTERED_ROBOTS -ROOT_PATH= os.path.dirname(__file__) -CONFIG_PATH=os.path.join(ROOT_PATH, '..', 'configs') +from ig_navigation.turtlebot import Turtlebot + +ROOT_PATH = os.path.dirname(__file__) +CONFIG_PATH = os.path.join(ROOT_PATH, "..", "configs") REGISTERED_ROBOTS["Turtlefast"] = Turtlebot diff --git a/ig_navigation/callbacks.py b/ig_navigation/callbacks.py index 584a91a..06ab6b3 100644 --- a/ig_navigation/callbacks.py +++ b/ig_navigation/callbacks.py @@ -1,7 +1,8 @@ -from ray.rllib.agents.callbacks import DefaultCallbacks -import cv2 from pathlib import Path +import cv2 +from ray.rllib.agents.callbacks import DefaultCallbacks + class MetricsCallback(DefaultCallbacks): def on_episode_step(self, *, worker, base_env, policies, episode, **kwargs): diff --git a/ig_navigation/floor_sampler.py b/ig_navigation/floor_sampler.py index 315fa83..2d467a8 100644 --- a/ig_navigation/floor_sampler.py +++ b/ig_navigation/floor_sampler.py @@ -1,6 +1,5 @@ import numpy as np import pybullet as p - from igibson.external.pybullet_tools.utils import ( get_aabb_center, get_aabb_extent, diff --git a/ig_navigation/igibson_env.py b/ig_navigation/igibson_env.py index 3aecb13..2e8f2d8 100644 --- a/ig_navigation/igibson_env.py +++ b/ig_navigation/igibson_env.py @@ -1,17 +1,16 @@ import argparse -from igibson.envs.igibson_env import iGibsonEnv -import numpy as np - -import numpy as np - import gym +import numpy as np +from igibson.envs.igibson_env import iGibsonEnv +from igibson.sensors.bump_sensor import BumpSensor from igibson.sensors.scan_sensor import ScanSensor from igibson.sensors.vision_sensor import VisionSensor -from igibson.sensors.bump_sensor import BumpSensor from igibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT + from ig_navigation.search_task import SearchTask + class SearchEnv(iGibsonEnv): metadata = {"render.modes": ["rgb_array"]} @@ -113,12 +112,16 @@ def load_observation_space(self): vision_modalities.append("normal") if "seg" in self.output: observation_space["seg"] = self.build_obs_space( - shape=(self.image_height, self.image_width, 1), low=0.0, high=MAX_CLASS_COUNT + shape=(self.image_height, self.image_width, 1), + low=0.0, + high=MAX_CLASS_COUNT, ) vision_modalities.append("seg") if "ins_seg" in self.output: observation_space["ins_seg"] = self.build_obs_space( - shape=(self.image_height, self.image_width, 1), low=0.0, high=MAX_INSTANCE_COUNT + shape=(self.image_height, self.image_width, 1), + low=0.0, + high=MAX_INSTANCE_COUNT, ) vision_modalities.append("ins_seg") if "rgb_filled" in self.output: # use filler @@ -134,9 +137,13 @@ def load_observation_space(self): if "scan" in self.output: self.n_horizontal_rays = self.config.get("n_horizontal_rays", 128) self.n_vertical_beams = self.config.get("n_vertical_beams", 1) - assert self.n_vertical_beams == 1, "scan can only handle one vertical beam for now" + assert ( + self.n_vertical_beams == 1 + ), "scan can only handle one vertical beam for now" observation_space["scan"] = self.build_obs_space( - shape=(self.n_horizontal_rays * self.n_vertical_beams, 1), low=0.0, high=1.0 + shape=(self.n_horizontal_rays * self.n_vertical_beams, 1), + low=0.0, + high=1.0, ) scan_modalities.append("scan") if "occupancy_grid" in self.output: @@ -188,7 +195,6 @@ def get_state(self): return state - if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( diff --git a/ig_navigation/search_reward.py b/ig_navigation/search_reward.py index 0ca35c2..626c70f 100644 --- a/ig_navigation/search_reward.py +++ b/ig_navigation/search_reward.py @@ -1,8 +1,7 @@ -from igibson.reward_functions.reward_function_base import BaseRewardFunction -from igibson.utils.utils import l2_distance import numpy as np - from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot +from igibson.reward_functions.reward_function_base import BaseRewardFunction +from igibson.utils.utils import l2_distance class SearchReward(BaseRewardFunction): diff --git a/ig_navigation/search_task.py b/ig_navigation/search_task.py index 2aca1d1..87b62d1 100644 --- a/ig_navigation/search_task.py +++ b/ig_navigation/search_task.py @@ -1,41 +1,24 @@ -import numpy as np +import os +import numpy as np from bddl.object_taxonomy import ObjectTaxonomy -from igibson.utils.assets_utils import ( - get_ig_avg_category_specs, - get_ig_category_path, - get_ig_model_path, -) - +from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot from igibson.objects.articulated_object import URDFObject from igibson.tasks.task_base import BaseTask - -from ig_navigation.floor_sampler import sample_on_floor -from ig_navigation.search_reward import SearchReward, PotentialReward, PotentialReward -from igibson.termination_conditions.timeout import Timeout - from igibson.termination_conditions.termination_condition_base import ( BaseTerminationCondition, ) -from igibson.utils.utils import l2_distance -import numpy as np -import os - -from igibson.object_states.robot_related_states import ObjectsInFOVOfRobot - -from bddl.object_taxonomy import ObjectTaxonomy +from igibson.termination_conditions.timeout import Timeout from igibson.utils.assets_utils import ( get_ig_avg_category_specs, get_ig_category_path, get_ig_model_path, ) +from igibson.utils.utils import l2_distance -from igibson.objects.articulated_object import URDFObject -from igibson.tasks.task_base import BaseTask - -from igibson.termination_conditions.timeout import Timeout +from ig_navigation.floor_sampler import sample_on_floor +from ig_navigation.search_reward import PotentialReward, SearchReward -import os class SearchTermination(BaseTerminationCondition): """ @@ -62,7 +45,10 @@ def get_termination(self, task, env): ) < self.dist_tol ) - in_view = (task.target_obj.main_body in env.robots[0].states[ObjectsInFOVOfRobot].get_value()) + in_view = ( + task.target_obj.main_body + in env.robots[0].states[ObjectsInFOVOfRobot].get_value() + ) done = done and in_view success = done return done, success @@ -100,9 +86,7 @@ def get_sampleable_categories(self): def choose_task(self): # obj_pro = self.import_object(wordnet_category = 'microwave.n.02' , model='7128') - obj_pro = self.import_object( - igibson_category="microwave", model="7320" - ) + obj_pro = self.import_object(igibson_category="microwave", model="7320") self.target_obj = obj_pro room = np.random.choice(np.array(list(self.scene.room_ins_name_to_ins_id))) sample_on_floor(obj_pro, self.scene, room=room) diff --git a/ig_navigation/turtlebot.py b/ig_navigation/turtlebot.py index 7b0d2df..f8afb30 100644 --- a/ig_navigation/turtlebot.py +++ b/ig_navigation/turtlebot.py @@ -1,13 +1,11 @@ import os -import numpy as np - -import igibson -from igibson.robots.two_wheel_robot import TwoWheelRobot import gym -from scipy.spatial.transform import Rotation as R import igibson +import numpy as np import pybullet as p +from igibson.robots.two_wheel_robot import TwoWheelRobot +from scipy.spatial.transform import Rotation as R class Turtlebot(TwoWheelRobot): diff --git a/keyboard_demo.py b/keyboard_demo.py index 6d71877..ab10cb7 100644 --- a/keyboard_demo.py +++ b/keyboard_demo.py @@ -2,12 +2,13 @@ import cv2 import hydra +import matplotlib.pyplot as plt from igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings from omegaconf import OmegaConf import ig_navigation from ig_navigation.igibson_env import SearchEnv -import matplotlib.pyplot as plt + @hydra.main(config_path=ig_navigation.CONFIG_PATH, config_name="config") def main(cfg): diff --git a/setup.py b/setup.py index bd64a50..a004df4 100644 --- a/setup.py +++ b/setup.py @@ -4,5 +4,5 @@ name="ig_navigation", version="0.0.1", author="Stanford University", - packages=['ig_navigation'] + packages=["ig_navigation"], ) diff --git a/train.py b/train.py index 7b0ab07..2ebdf0e 100644 --- a/train.py +++ b/train.py @@ -1,15 +1,16 @@ import re -import numpy as np +from pathlib import Path + import hydra -from omegaconf import OmegaConf +import numpy as np import ray +from omegaconf import OmegaConf from ray.rllib.agents import ppo -from ray.tune.registry import register_env -from pathlib import Path from ray.tune.logger import UnifiedLogger +from ray.tune.registry import register_env import ig_navigation -from ig_navigation.callbacks import MetricsCallback, DummyCallback +from ig_navigation.callbacks import DummyCallback, MetricsCallback def igibson_env_creator(env_config): @@ -78,7 +79,7 @@ def main(cfg): Path(log_path).mkdir(parents=True, exist_ok=True) trainer = ppo.PPOTrainer( config, - logger_creator=lambda x: UnifiedLogger(x, log_path), + logger_creator=lambda x: UnifiedLogger(x, log_path), # type: ignore ) if Path(checkpoint_path).exists():