diff --git a/back_fisheye_image.png b/back_fisheye_image.png new file mode 100644 index 0000000000..918e12070c Binary files /dev/null and b/back_fisheye_image.png differ diff --git a/domain.pddl b/domain.pddl new file mode 100644 index 0000000000..b3c614b8e5 --- /dev/null +++ b/domain.pddl @@ -0,0 +1,55 @@ +(define (domain mydomain) + (:requirements :typing) + (:types block robot) + + (:predicates + (Clear ?x0 - block) + (GripperOpen ?x0 - robot) + (Holding ?x0 - block) + (On ?x0 - block ?x1 - block) + (OnTable ?x0 - block) + ) + + (:action PickFromTable + :parameters (?block - block ?robot - robot) + :precondition (and (Clear ?block) + (GripperOpen ?robot) + (OnTable ?block)) + :effect (and (Holding ?block) + (not (Clear ?block)) + (not (GripperOpen ?robot)) + (not (OnTable ?block))) + ) + + (:action PutOnTable + :parameters (?block - block ?robot - robot) + :precondition (and (Holding ?block)) + :effect (and (Clear ?block) + (GripperOpen ?robot) + (OnTable ?block) + (not (Holding ?block))) + ) + + (:action Stack + :parameters (?block - block ?otherblock - block ?robot - robot) + :precondition (and (Clear ?otherblock) + (Holding ?block)) + :effect (and (Clear ?block) + (GripperOpen ?robot) + (On ?block ?otherblock) + (not (Clear ?otherblock)) + (not (Holding ?block))) + ) + + (:action Unstack + :parameters (?block - block ?otherblock - block ?robot - robot) + :precondition (and (Clear ?block) + (GripperOpen ?robot) + (On ?block ?otherblock)) + :effect (and (Clear ?otherblock) + (Holding ?block) + (not (Clear ?block)) + (not (GripperOpen ?robot)) + (not (On ?block ?otherblock))) + ) +) \ No newline at end of file diff --git a/frontleft_fisheye_image.png b/frontleft_fisheye_image.png new file mode 100644 index 0000000000..adbb173b48 Binary files /dev/null and b/frontleft_fisheye_image.png differ diff --git a/frontright_fisheye_image.png b/frontright_fisheye_image.png new file mode 100644 index 0000000000..f38dbc2ba4 Binary files /dev/null and b/frontright_fisheye_image.png differ diff --git a/hand_color_image.png b/hand_color_image.png new file mode 100644 index 0000000000..ef7a53f54d Binary files /dev/null and b/hand_color_image.png differ diff --git a/hand_image_0.png b/hand_image_0.png new file mode 100644 index 0000000000..7557ab8050 Binary files /dev/null and b/hand_image_0.png differ diff --git a/image_0.png b/image_0.png new file mode 100644 index 0000000000..b479d56835 Binary files /dev/null and b/image_0.png differ diff --git a/image_1.png b/image_1.png new file mode 100644 index 0000000000..16cddeff84 Binary files /dev/null and b/image_1.png differ diff --git a/image_2.png b/image_2.png new file mode 100644 index 0000000000..78b1bf1fec Binary files /dev/null and b/image_2.png differ diff --git a/image_3.png b/image_3.png new file mode 100644 index 0000000000..b479d56835 Binary files /dev/null and b/image_3.png differ diff --git a/image_4.png b/image_4.png new file mode 100644 index 0000000000..94fc14dd60 Binary files /dev/null and b/image_4.png differ diff --git a/image_5.png b/image_5.png new file mode 100644 index 0000000000..f6133f3698 Binary files /dev/null and b/image_5.png differ diff --git a/image_i.png b/image_i.png new file mode 100644 index 0000000000..9b227df31d Binary files /dev/null and b/image_i.png differ diff --git a/left_fisheye_image.png b/left_fisheye_image.png new file mode 100644 index 0000000000..8ca453628e Binary files /dev/null and b/left_fisheye_image.png differ diff --git a/my_image.png b/my_image.png new file mode 100644 index 0000000000..14f3e7b4aa Binary files /dev/null and b/my_image.png differ diff --git a/my_image2.png b/my_image2.png new file mode 100644 index 0000000000..6417b933cc Binary files /dev/null and b/my_image2.png differ diff --git a/predicators/cogman.py b/predicators/cogman.py index 4401f60c98..927dbc6f30 100644 --- a/predicators/cogman.py +++ b/predicators/cogman.py @@ -18,7 +18,7 @@ from predicators.settings import CFG from predicators.structs import Action, Dataset, EnvironmentTask, GroundAtom, \ InteractionRequest, InteractionResult, LowLevelTrajectory, Metrics, \ - Observation, State, Task, Video + Observation, State, Task, Video, Object class CogMan: @@ -38,6 +38,8 @@ def __init__(self, approach: BaseApproach, perceiver: BasePerceiver, self._episode_action_history: List[Action] = [] self._episode_images: Video = [] self._episode_num = -1 + self._last_goal = None + self._last_act = None def reset(self, env_task: EnvironmentTask) -> None: """Start a new episode of environment interaction.""" @@ -60,6 +62,9 @@ def reset(self, env_task: EnvironmentTask) -> None: def step(self, observation: Observation) -> Optional[Action]: """Receive an observation and produce an action, or None for done.""" state = self._perceiver.step(observation) + print("State: ", state, "\n") + print("Abstract State:", utils.abstract(state, self._approach._initial_predicates)) + print("\n\n") if CFG.make_cogman_videos: assert self._current_env_task is not None imgs = self._perceiver.render_mental_images( @@ -80,9 +85,18 @@ def step(self, observation: Observation) -> Optional[Action]: logging.info("[CogMan] Termination triggered.") return None # Check if we should replan. + self._exec_monitor.perceiver = self._perceiver + self._exec_monitor.env_task = self._current_env_task if self._exec_monitor.step(state): logging.info("[CogMan] Replanning triggered.") + self._last_goal = self._current_goal + try: + self._last_act = self._current_policy(state) + except Exception as e: + import ipdb; ipdb.set_trace() + self._current_goal = self._exec_monitor._curr_goal assert self._current_goal is not None + state = self._perceiver.step(observation) task = Task(state, self._current_goal) self._reset_policy(task) self._exec_monitor.reset(task) @@ -94,7 +108,11 @@ def step(self, observation: Observation) -> Optional[Action]: if self._override_policy is None: assert not self._exec_monitor.step(state) assert self._current_policy is not None - act = self._current_policy(state) + try: + act = self._current_policy(state) + except Exception as e: + assert self._last_act is not None + act = self._last_act self._perceiver.update_perceiver_with_action(act) self._exec_monitor.update_approach_info( self._approach.get_execution_monitoring_info()) diff --git a/predicators/envs/spot_env.py b/predicators/envs/spot_env.py index 5c3f35309d..d24876d92c 100644 --- a/predicators/envs/spot_env.py +++ b/predicators/envs/spot_env.py @@ -57,6 +57,8 @@ SpotActionExtraInfo, State, STRIPSOperator, Type, Variable, \ VLMGroundAtom, VLMPredicate from predicators.utils import log_rich_table +from predicators.spot_utils.perception.object_perception import vlm +import PIL.Image ############################################################################### # Base Class # @@ -269,6 +271,9 @@ def __init__(self, use_gui: bool = True) -> None: # Used for the move-related hacks in step(). self._last_known_object_poses: Dict[Object, math_helpers.SE3Pose] = {} + # Used for novel objects + self._novel_objects_with_query: Set[Tuple[Object, str]] = set() + def _initialize_pybullet(self) -> None: # First, check if we have any connections to pybullet already, # and reset them. @@ -628,6 +633,63 @@ def get_observation(self) -> Observation: def goal_reached(self) -> bool: return self._current_task_goal_reached + def _get_vlm_goal_prompt_prefix(self, + object_names: Collection[str]) -> str: + # pylint:disable=line-too-long + available_predicates = ", ".join([p for p in sorted([pred.pretty_str()[1] for pred in self.goal_predicates if 'Inside(' in pred.pretty_str()[1]])]) + available_object_types = ", ".join(sorted([t.name for t in _ALL_TYPES])) + # We could extract the object names, but this is simpler. + prompt = f"""# The available predicates are: {available_predicates} +# The available object types are: {available_object_types} +# Use the available predicates and object types to convert natural language goals into JSON goals. + +# I want a sandwich with a patty, cheese, and lettuce, and get ready to give me a glass of milk. +{{"Holding": [["robot", "milk"]], "On": [["bread0", "board"], ["bread1", "lettuce0"], ["lettuce0", "cheese0"], ["cheese0", "patty0"], ["patty0", "bread0"]]}} +""" + return prompt + + def _parse_vlm_objects_from_rgbds( + self, rgbds: Dict[str, RGBDImageWithContext], language_goal: str, + id_to_obj: Dict[str, Object]) -> Set[GroundAtom]: + """Helper for parsing language-based goals from JSON task specs.""" + ### + language_goal = 'place empty cups into the cardboard_box' + ### + object_names = set(id_to_obj) + prompt_prefix = self._get_vlm_goal_prompt_prefix(object_names) + prompt = prompt_prefix + f"\n# {language_goal}" + image_list = [ + PIL.Image.fromarray(v.rotated_rgb) for k, v in rgbds.items() if 'hand' in k + ] + responses = vlm.sample_completions( + prompt=prompt, + imgs=image_list, + temperature=0.1, + seed=int(time.time()), + num_completions=1, + ) + response = responses[0] + # Currently assumes that the LLM is perfect. In the future, will need + # to handle various errors and perhaps query the LLM for multiple + # responses until we find one that can be parsed. + try: + goal_spec = json.loads(response) + except json.JSONDecodeError as e: + goal_spec = json.loads(response.replace('`', '').replace('json', '')) + + for pred, args in goal_spec.items(): + for arg in args: + for obj_name in arg: + if 'robot' in obj_name: + id_to_obj[obj_name] = Object(obj_name, _robot_type) + elif any([name in obj_name for name in ['cup', 'box', 'bin']]): + id_to_obj[obj_name] = Object(obj_name, _container_type) + else: + id_to_obj[obj_name] = Object(obj_name, _movable_object_type) + + new_goal = self._parse_goal_from_json(goal_spec, id_to_obj) + return new_goal + def _build_realworld_observation( self, nonpercept_atoms: Set[GroundAtom], curr_obs: Optional[_SpotObservation]) -> _SpotObservation: @@ -640,14 +702,21 @@ def _build_realworld_observation( assert self._robot is not None assert self._localizer is not None self._localizer.localize() - # Get the universe of all object detections. - all_object_detection_ids = set(self._detection_id_to_obj) # Get the camera images. time.sleep(0.5) rgbds = capture_images(self._robot, self._localizer) + + ## TODO: get novel objects here + new_goal_from_vlm = self._parse_vlm_objects_from_rgbds(rgbds, self._current_task.goal_description, {}) + # ADD objects to _novel_objects_with_query + self._novel_objects_with_query.add((Object("cup1", _container_type), "red toy cup/red cup/small red cylinder/red plastic circle")) + #self._novel_objects_with_query.add((Object("cup2", _container_type), "blue cup/blue cylinder/blue plastic circle/blue toy cup")) + # self._novel_objects_with_query.add((Object("cup3", _container_type), "green cup/green mug/uncovered green cup")) + ## + # Get the universe of all object detections. + all_object_detection_ids = set(self._detection_id_to_obj) all_detections, all_artifacts = detect_objects( all_object_detection_ids, rgbds, self._allowed_regions) - # Separately, get detections for the hand in particular. hand_rgbd = { k: v @@ -1233,10 +1302,15 @@ def _top_above_classifier(state: State, objects: Sequence[Object]) -> bool: def _inside_classifier(state: State, objects: Sequence[Object]) -> bool: obj_in, obj_container = objects - # NOTE: Legacy version evaluate predicates individually - if CFG.spot_vlm_eval_predicate: - raise RuntimeError( - "VLM predicate classifier should be evaluated in batch!") + # # NOTE: Legacy version evaluate predicates individually + # if CFG.spot_vlm_eval_predicate: + # raise RuntimeError( + # "VLM predicate classifier should be evaluated in batch!") + if "cup" in obj_in.name and "cup" in obj_container.name: + return False + + # if "cup1" in obj_in.name and "cardboard" in obj_container.name: + # return False if not _object_in_xy_classifier( state, obj_in, obj_container, buffer=_INSIDE_SURFACE_BUFFER): @@ -1530,6 +1604,8 @@ def _get_sweeping_surface_for_container(container: Object, _not_holding_classifier) _InHandView = Predicate("InHandView", [_robot_type, _movable_object_type], in_hand_view_classifier) +_InHandViewFromTop = Predicate("InHandViewFromTop", [_robot_type, _movable_object_type], + in_hand_view_classifier) _InView = Predicate("InView", [_robot_type, _movable_object_type], in_general_view_classifier) _Reachable = Predicate("Reachable", [_robot_type, _base_object_type], @@ -1558,7 +1634,7 @@ def _get_sweeping_surface_for_container(container: Object, # DEBUG hardcode now; CFG not updated here?! # if CFG.spot_vlm_eval_predicate: -tmp_vlm_flag = True +tmp_vlm_flag = False if tmp_vlm_flag: # _On = Predicate("On", [_movable_object_type, _base_object_type], @@ -1571,7 +1647,7 @@ def _get_sweeping_surface_for_container(container: Object, _Inside = VLMPredicate( "Inside", [_movable_object_type, _container_type], prompt= - "This typically describes an object (obj1, first arg) inside a container (obj2, second arg) (so it's overlapping), and it's in conflict with the object being on a surface. This is obj1 inside obj2, so obj1 should be smaller than obj2." + "This describes when an object (obj1:type, first arg) is inside another object/container (obj2:type, second arg) (so it's overlapping), and it's in conflict with the object being on a surface. Inside(obj1:type, obj2:type) means obj1 is inside obj2, so obj1 should be smaller than obj2." ) _FakeInside = VLMPredicate( _Inside.name, @@ -1641,22 +1717,22 @@ def _get_sweeping_surface_for_container(container: Object, # NOTE: How to express the belief-space 3-value predicate using state-space binary predicate is tricky, because the system doesn't support NOT (negation) or OR (disjunction). Thus, it couldn't do "NotKnownAsTrue OR NotKnownAsFalse". - # E.g,. _ContainingWaterKnownAsTrue won't work. - _ContainingWaterKnown = VLMPredicate( - "ContainingWaterKnown", [_container_type], - prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if you know whether the container contains water or not. If you don't know, answer [no]." + # E.g,. _ContainingFoodKnownAsTrue won't work. + _ContainingFoodKnown = VLMPredicate( + "ContainingFoodKnown", [_container_type], + prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if you know whether the steel bowl has anything in it or not. If you don't know, answer [no]." ) - _ContainingWaterUnknown = VLMPredicate( - "ContainingWaterUnknown", [_container_type], - prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if you do not know whether the container contains water or not. If you know, answer [no]." + _ContainingFoodUnknown = VLMPredicate( + "ContainingFoodUnknown", [_container_type], + prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if you do not know whether the steel bowl has anything in it or not. If you know, answer [no]." ) - _ContainingWater = VLMPredicate( - "ContainingWater", [_container_type], - prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if the container has water in it. If you know it doesn't have water, answer [no]." + _ContainingFood = VLMPredicate( + "ContainingFood", [_container_type], + prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if the steel bowl has anything in it. If you know it doesn't have anything, answer [no]." ) - _NotContainingWater = VLMPredicate( - "NotContainingWater", [_container_type], - prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if the container does not have water in it. If it has water, answer [no]." + _NotContainingFood = VLMPredicate( + "NotContainingFood", [_container_type], + prompt="[Answer: yes/no only] This predicate is true (answer [yes]) if the steel bowl does not have anything in it. If it has anything, answer [no]." ) _InHandViewFromTop = VLMPredicate( @@ -1667,13 +1743,36 @@ def _get_sweeping_surface_for_container(container: Object, _ALL_PREDICATES.update({ _DoorOpenKnownTrue, _DoorOpenKnownFalse, - _ContainingWaterKnown, - _ContainingWaterUnknown, - _ContainingWater, - _NotContainingWater, + _ContainingFoodKnown, + _ContainingFoodUnknown, + _ContainingFood, + _NotContainingFood, + _InHandViewFromTop # TODO check why missing + }) +else: + _door_type = Type( + "door", + list(_base_object_type.feature_names) + + ["is_open", "in_hand_view"], + parent=_immovable_object_type + ) + _DoorOpenKnownTrue = Predicate("DoorOpenKnownTrue", [_door_type], lambda s, o: True) + _DoorOpenKnownFalse = Predicate("DoorOpenKnownFalse", [_door_type], lambda s, o: False) + _ContainingFoodKnown = Predicate("ContainingFoodKnown", [_container_type], lambda s, o: True) + _ContainingFoodUnknown = Predicate("ContainingFoodUnknown", [_container_type], lambda s, o: False) + _ContainingFood = Predicate("ContainingFood", [_container_type], lambda s, o: False) + _NotContainingFood = Predicate("NotContainingFood", [_container_type], lambda s, o: True) + _ALL_PREDICATES.update({ + _DoorOpenKnownTrue, + _DoorOpenKnownFalse, + _ContainingFoodKnown, + _ContainingFoodUnknown, + _ContainingFood, + _NotContainingFood, _InHandViewFromTop # TODO check why missing }) + _VLM_CLASSIFIER_PREDICATES: Set[VLMPredicate] = { p for p in _ALL_PREDICATES if isinstance(p, VLMPredicate) @@ -1745,18 +1844,18 @@ def _create_operators() -> Iterator[STRIPSOperator]: LiftedAtom(_InHandViewFromTop, [robot, cup]), # TODO comment LiftedAtom(_HandEmpty, [robot]), LiftedAtom(_NotHolding, [robot, cup]), - LiftedAtom(_ContainingWaterUnknown, [cup]), + LiftedAtom(_ContainingFoodUnknown, [cup]), } # NOTE: Determinized effect: both Containing and NotContaining # The belief state will be updated after execution add_effs = { - LiftedAtom(_ContainingWaterKnown, [cup]), - LiftedAtom(_ContainingWater, [cup]), + LiftedAtom(_ContainingFoodKnown, [cup]), + LiftedAtom(_ContainingFood, [cup]), # TODO add not containing water - LiftedAtom(_NotContainingWater, [cup]) + LiftedAtom(_NotContainingFood, [cup]) } del_effs = { - LiftedAtom(_ContainingWaterUnknown, [cup]) + LiftedAtom(_ContainingFoodUnknown, [cup]) } # TODO check ignore effs ignore_effs = {_Reachable, _InHandViewFromTop, _InView, _RobotReadyForSweeping} @@ -3399,7 +3498,7 @@ def _detection_id_to_obj(self) -> Dict[ObjectDetectionID, Object]: # "green bowl/greenish bowl") # TODO test green_bowl_detection = LanguageObjectDetectionID( - "cardboard box/brown box") + "cardboard box/brown box") detection_id_to_obj[green_bowl_detection] = green_bowl # # TODO temp test new object sets @@ -3472,7 +3571,8 @@ def _detection_id_to_obj(self) -> Dict[ObjectDetectionID, Object]: # NOTE: we view cup as container; cup = Object("cup", _container_type) # cup_detection = LanguageObjectDetectionID("green bowl/greenish bowl") - cup_detection = LanguageObjectDetectionID("orange cup/orange cylinder/orange-ish mug") + # cup_detection = LanguageObjectDetectionID("steel bowl/metal bowl/shiny-metallic bowl") + cup_detection = LanguageObjectDetectionID("green toy cup/green cup/small green cylinder/green plastic circle") # TODO test # cup_detection = LanguageObjectDetectionID("spam box/spam container/spam-ish box") # cup_detection = LanguageObjectDetectionID("yellow apple/yellowish apple") @@ -3481,13 +3581,20 @@ def _detection_id_to_obj(self) -> Dict[ObjectDetectionID, Object]: detection_id_to_obj[cup_detection] = cup cardboard_box = Object("cardboard_box", _container_type) - cardboard_box_detection = LanguageObjectDetectionID("cardboard box/brown box") + cardboard_box_detection = LanguageObjectDetectionID("cardboard box/brown box") detection_id_to_obj[cardboard_box_detection] = cardboard_box for obj, pose in get_known_immovable_objects().items(): detection_id = KnownStaticObjectDetectionID(obj.name, pose) detection_id_to_obj[detection_id] = obj + ### TODO: add novel objects to state + if len(self._novel_objects_with_query) > 0: + #logging.info(f"Env: Novel objects with query: {self._novel_objects_with_query}") + for obj, query in self._novel_objects_with_query: + detection_id_to_obj[LanguageObjectDetectionID(query)] = obj + ### + return detection_id_to_obj def _generate_goal_description(self) -> GoalDescription: @@ -3541,7 +3648,7 @@ def _detection_id_to_obj(self) -> Dict[ObjectDetectionID, Object]: detection_id_to_obj[cup_detection] = cup cardboard_box = Object("cardboard_box", _container_type) - cardboard_box_detection = LanguageObjectDetectionID("cardboard box/brown box") + cardboard_box_detection = LanguageObjectDetectionID("cardboard box/brown box") detection_id_to_obj[cardboard_box_detection] = cardboard_box for obj, pose in get_known_immovable_objects().items(): diff --git a/predicators/execution_monitoring/base_execution_monitor.py b/predicators/execution_monitoring/base_execution_monitor.py index 631b979158..d65ad7ad2f 100644 --- a/predicators/execution_monitoring/base_execution_monitor.py +++ b/predicators/execution_monitoring/base_execution_monitor.py @@ -12,6 +12,7 @@ class BaseExecutionMonitor(abc.ABC): def __init__(self) -> None: self._approach_info: List[Any] = [] self._curr_plan_timestep = 0 + self._curr_goal = None @classmethod @abc.abstractmethod diff --git a/predicators/execution_monitoring/expected_atoms_monitor.py b/predicators/execution_monitoring/expected_atoms_monitor.py index 884483b15e..deb5eb4032 100644 --- a/predicators/execution_monitoring/expected_atoms_monitor.py +++ b/predicators/execution_monitoring/expected_atoms_monitor.py @@ -7,6 +7,8 @@ BaseExecutionMonitor from predicators.settings import CFG from predicators.structs import State +from predicators.structs import GroundAtom, Object +from predicators.spot_utils.utils import _container_type class ExpectedAtomsExecutionMonitor(BaseExecutionMonitor): @@ -31,6 +33,53 @@ def step(self, state: State) -> bool: # If the expected atoms are a subset of the current atoms, then # we don't have to replan. unsat_atoms = {a for a in next_expected_atoms if not a.holds(state)} + # Check goal + assert self.perceiver is not None and self.env_task is not None + new_goal = self.perceiver._create_goal(state, self.env_task.goal_description) + if new_goal != self._curr_goal: + logging.info( + "Expected atoms execution monitor triggered replanning " + "because the goal has changed.") + logging.info(f"Old goal: {self._curr_goal}") + logging.info(f"New goal: {new_goal}") + self._curr_goal = new_goal + # Map th objects in the new goal to the objects in the state + import jellyfish + def map_goal_to_state(goal_predicates, state_data): + goal_to_state_mapping = {} + state_usage_count = {state_obj: 0 for state_obj in state_data.keys()} + mapped_objects = set() + for pred in goal_predicates: + for goal_obj in pred.objects: + if goal_obj in mapped_objects: + continue + goal_obj_name = str(goal_obj) + closest_state_obj = None + min_distance = float('inf') + for state_obj in state_data.keys(): + state_obj_name = str(state_obj) + distance = jellyfish.levenshtein_distance(goal_obj_name, state_obj_name) + if distance < min_distance: + min_distance = distance + closest_state_obj = state_obj + if state_usage_count[closest_state_obj] > 0: + virtual_obj = Object(f"{closest_state_obj.name}{state_usage_count[closest_state_obj]}", closest_state_obj.type) + goal_to_state_mapping[goal_obj] = virtual_obj + else: + goal_to_state_mapping[goal_obj] = closest_state_obj + state_usage_count[closest_state_obj] += 1 + mapped_objects.add(goal_obj) + return goal_to_state_mapping + mapping = map_goal_to_state(self._curr_goal, state.data) + new_goal = set() + for pred in self._curr_goal: + new_goal.add(GroundAtom(pred.predicate, [mapping[obj] for obj in pred.objects])) + self._curr_goal = new_goal + self.perceiver._novel_objects_with_query.add((Object("cup1", _container_type), "red toy cup/red cup/small red cylinder/red plastic circle")) + #self.perceiver._novel_objects_with_query.add((Object("cup2", _container_type), "blue cup/blue cylinder/blue plastic cup/blue toy cup")) + # self.perceiver._novel_objects_with_query.add((Object("cup3", _container_type), "green cup/green mug/uncovered green cup")) + if "cup1" in str(self._curr_goal): + return True if not unsat_atoms: return False logging.info( diff --git a/predicators/ground_truth_models/spot_env/nsrts.py b/predicators/ground_truth_models/spot_env/nsrts.py index 982e639be5..25cb6a02b9 100644 --- a/predicators/ground_truth_models/spot_env/nsrts.py +++ b/predicators/ground_truth_models/spot_env/nsrts.py @@ -98,8 +98,8 @@ def _move_to_hand_view_object_from_top_sampler(state: State, goal: Set[GroundAto # Parameters are relative distance, dyaw (to the object you're moving to). del goal - min_dist = 1.2 - max_dist = 1.8 + min_dist = 0.9 + max_dist = 1.2 robot_obj = objs[0] obj_to_nav_to = objs[1] diff --git a/predicators/ground_truth_models/spot_env/options.py b/predicators/ground_truth_models/spot_env/options.py index 4aab7e5f3d..a689133f1f 100644 --- a/predicators/ground_truth_models/spot_env/options.py +++ b/predicators/ground_truth_models/spot_env/options.py @@ -38,7 +38,8 @@ from predicators.spot_utils.utils import DEFAULT_HAND_DROP_OBJECT_POSE, \ DEFAULT_HAND_LOOK_STRAIGHT_DOWN_POSE, DEFAULT_HAND_POST_DUMP_POSE, \ DEFAULT_HAND_PRE_DUMP_LIFT_POSE, DEFAULT_HAND_PRE_DUMP_POSE, \ - get_relative_se2_from_se3, load_spot_metadata, object_to_top_down_geom + get_relative_se2_from_se3, load_spot_metadata, object_to_top_down_geom, \ + DEFAULT_HAND_LOOK_FROM_TOP from predicators.structs import Action, Array, Object, ParameterizedOption, \ Predicate, SpotActionExtraInfo, State, Type @@ -531,7 +532,10 @@ def _move_to_hand_view_object_from_above_policy(state: State, memory: Dict, yaw_param_idx = 1 robot_obj_idx = 0 target_obj_idx = 1 - do_gaze = True + do_gaze = False + robot, localizer, _ = get_robot() + move_hand_to_relative_pose(robot, DEFAULT_HAND_LOOK_FROM_TOP) + #open_gripper(robot) return _move_to_target_policy(name, distance_param_idx, yaw_param_idx, robot_obj_idx, target_obj_idx, do_gaze, state, memory, objects, params) diff --git a/predicators/perception/spot_perceiver.py b/predicators/perception/spot_perceiver.py index 4bd8280b67..122e2ce6cf 100644 --- a/predicators/perception/spot_perceiver.py +++ b/predicators/perception/spot_perceiver.py @@ -3,18 +3,21 @@ import logging import time from pathlib import Path -from typing import Dict, List, Optional, Set +from typing import Dict, List, Optional, Set, Collection +import json import imageio.v2 as iio import numpy as np from bosdyn.client import math_helpers from matplotlib import pyplot as plt +import PIL.Image +import multiprocessing as mp from predicators import utils from predicators.envs import BaseEnv, get_or_create_env from predicators.envs.spot_env import HANDEMPTY_GRIPPER_THRESHOLD, \ SpotCubeEnv, SpotRearrangementEnv, _drafting_table_type, \ - _PartialPerceptionState, _SpotObservation, in_general_view_classifier + _PartialPerceptionState, _SpotObservation, in_general_view_classifier, _ALL_TYPES from predicators.perception.base_perceiver import BasePerceiver from predicators.settings import CFG from predicators.spot_utils.utils import _container_type, \ @@ -23,6 +26,9 @@ from predicators.structs import Action, DefaultState, EnvironmentTask, \ GoalDescription, GroundAtom, Object, Observation, Predicate, \ SpotActionExtraInfo, State, Task, Video +from predicators.spot_utils.perception.object_perception import vlm +from predicators.spot_utils.perception.object_detection import detect_objects, \ + ObjectDetectionID, LanguageObjectDetectionID, visualize_all_artifacts class SpotPerceiver(BasePerceiver): @@ -45,7 +51,9 @@ def __init__(self) -> None: 0, 0, 0, math_helpers.Quat()) self._lost_objects: Set[Object] = set() self._curr_env: Optional[BaseEnv] = None + self._curr_observation: Optional[_SpotObservation] = None self._waiting_for_observation = True + self._novel_objects_with_query: Set[Tuple[Object, str]] = set() self._ordered_objects: List[Object] = [] # list of all known objects # Keep track of objects that are contained (out of view) in another # object, like a bag or bucket. This is important not only for gremlins @@ -92,6 +100,7 @@ def update_perceiver_with_action(self, action: Action) -> None: self._prev_action = action def step(self, observation: Observation) -> State: + self._curr_observation = observation self._update_state_from_observation(observation) # Update the curr held item when applicable. assert self._curr_env is not None @@ -106,7 +115,8 @@ def step(self, observation: Observation) -> State: # operator! if "pick" in controller_name.lower(): if self._held_object is not None: - assert CFG.spot_run_dry + # assert CFG.spot_run_dry + pass else: # We know that the object that we attempted to grasp was # the second argument to the controller. @@ -170,6 +180,7 @@ def _update_state_from_observation(self, observation: Observation) -> None: assert isinstance(observation, _SpotObservation) # If a container is being updated, change the poses for contained # objects. + for container in observation.objects_in_view: if container not in self._container_to_contained_objects: continue @@ -229,6 +240,37 @@ def _create_state(self) -> State: "qz": self._robot_pos.rot.z, }, } + + ### TODO: add novel objects to state + # TODO (wmcclinton): get the novel objects from the observation or assert that they exist + # Note: the environment keeps track of its own novel objects to run its VLM predicates on + if len(self._novel_objects_with_query) > 0: + # logging.info(f"Perceiver: Novel objects with query: {self._novel_objects_with_query}") + detection_id_to_obj: Dict[ObjectDetectionID, Object] = {} + for obj, query in self._novel_objects_with_query: + detection_id_to_obj[LanguageObjectDetectionID(query)] = obj + + all_object_detection_ids = set(detection_id_to_obj) + all_detections, all_artifacts = detect_objects( + all_object_detection_ids, self._curr_observation.images, get_allowed_map_regions()) + + # if CFG.spot_render_perception_outputs: + # outdir = Path(CFG.spot_perception_outdir) + # time_str = time.strftime("%Y%m%d-%H%M%S") + # detections_outfile = outdir / f"new_obj_detections_{time_str}.png" + # no_detections_outfile = outdir / f"new_obj_no_detections_{time_str}.png" + # p1 = mp.Process(target=visualize_all_artifacts, args=(all_artifacts, detections_outfile, no_detections_outfile)) + # p1.start() + + all_objects_in_view = { + detection_id_to_obj[det_id]: val + for (det_id, val) in all_detections.items() + } + + for obj, pose in all_objects_in_view.items(): + self._known_object_poses[obj] = pose + ### + # Add new objects to the list of known objects. known_objs = set(self._ordered_objects) for obj in sorted(set(self._known_object_poses) - known_objs): @@ -245,6 +287,15 @@ def _create_state(self) -> State: "qz": pose.rot.z, "object_id": object_id, } + if obj.type is _container_type: + state_dict[obj]["shape"] = 2 + state_dict[obj]["height"] = 0.1 + state_dict[obj]["length"] = 0.1 + state_dict[obj]["width"] = 0.1 + state_dict[obj]["placeable"] = 1 + state_dict[obj]["is_sweeper"] = 0 + state_dict[obj]["radius"] = 0.1 + # Add static object features. static_feats = self._static_object_features.get(obj.name, {}) state_dict[obj].update(static_feats) @@ -311,7 +362,6 @@ def _create_state(self) -> State: def _create_goal(self, state: State, goal_description: GoalDescription) -> Set[GroundAtom]: - del state # not used # Unfortunate hack to deal with the fact that the state is actually # not yet set. Hopefully one day other cleanups will enable cleaning. assert self._curr_env is not None @@ -493,16 +543,16 @@ def _create_goal(self, state: State, return { GroundAtom(Inside, [block, bowl]), } - if goal_description == "put the red block into the plastic bin on floor": + if goal_description == "put the red block into the cardboard box on floor": block = Object("red_block", _movable_object_type) - bin = Object("plastic_bin", _container_type) + bin = Object("cardboard_box", _container_type) Inside = pred_name_to_pred["Inside"] return { GroundAtom(Inside, [block, bin]), } if goal_description == "put the red block into the box on floor": block = Object("red_block", _movable_object_type) - bin = Object("plastic_bin", _container_type) + bin = Object("cardboard_box", _container_type) Inside = pred_name_to_pred["Inside"] return { GroundAtom(Inside, [block, bin]), @@ -521,9 +571,9 @@ def _create_goal(self, state: State, return { GroundAtom(Inside, [block, bowl]), } - if goal_description == "put the red block on table into the plastic bin on floor": + if goal_description == "put the red block on table into the cardboard box on floor": block = Object("red_block", _movable_object_type) - bin = Object("plastic_bin", _container_type) + bin = Object("cardboard_box", _container_type) Inside = pred_name_to_pred["Inside"] return { GroundAtom(Inside, [block, bin]), @@ -550,30 +600,33 @@ def _create_goal(self, state: State, if goal_description == "know container not as empty": # container = Object("container", _container_type) cup = Object("cup", _container_type) - ContainingWaterKnown = pred_name_to_pred["ContainingWaterKnown"] - ContainingWater = pred_name_to_pred["ContainingWater"] + ContainingFoodKnown = pred_name_to_pred["ContainingFoodKnown"] + ContainingFood = pred_name_to_pred["ContainingFood"] return { - GroundAtom(ContainingWaterKnown, [cup]), - GroundAtom(ContainingWater, [cup]), + GroundAtom(ContainingFoodKnown, [cup]), + GroundAtom(ContainingFood, [cup]), } if goal_description == "place empty cup into the box": cup = Object("cup", _container_type) cardboard_box = Object("cardboard_box", _container_type) - ContainingWaterKnown = pred_name_to_pred["ContainingWaterKnown"] - NotContainingWater = pred_name_to_pred["NotContainingWater"] + ContainingFoodKnown = pred_name_to_pred["ContainingFoodKnown"] + NotContainingFood = pred_name_to_pred["NotContainingFood"] Inside = pred_name_to_pred["Inside"] - return { - GroundAtom(ContainingWaterKnown, [cup]), - GroundAtom(NotContainingWater, [cup]), - GroundAtom(Inside, [cup, cardboard_box]), - } + if state.data == {}: + return { + #GroundAtom(ContainingFoodKnown, [cup]), + #GroundAtom(NotContainingFood, [cup]), + GroundAtom(Inside, [cup, cardboard_box]), + } + object_name_to_object = {} + return self._parse_vlm_goal_from_state(state, goal_description, object_name_to_object) if goal_description == "know container as empty": cup = Object("cup", _container_type) - ContainingWaterKnown = pred_name_to_pred["ContainingWaterKnown"] - NotContainingWater = pred_name_to_pred["NotContainingWater"] + ContainingFoodKnown = pred_name_to_pred["ContainingFoodKnown"] + NotContainingFood = pred_name_to_pred["NotContainingFood"] return { - GroundAtom(ContainingWaterKnown, [cup]), - GroundAtom(NotContainingWater, [cup]), + GroundAtom(ContainingFoodKnown, [cup]), + GroundAtom(NotContainingFood, [cup]), } if goal_description == "put the cup into the cardboard box on floor": cup = Object("cup", _container_type) @@ -695,3 +748,80 @@ def render_mental_images(self, observation: Observation, logging.info(f"Wrote out to {outfile}") plt.close() return [img] + + def _get_language_goal_prompt_prefix(self, + object_names: Collection[str]) -> str: + # pylint:disable=line-too-long + available_predicates = ", ".join([p for p in sorted([pred.pretty_str()[1] for pred in self._curr_env.goal_predicates])]) + available_object_types = ", ".join(sorted([t.name for t in _ALL_TYPES])) + # We could extract the object names, but this is simpler. + prompt = f"""# The available predicates are: {available_predicates} +# The available object types are: {available_object_types} +# Use the available predicates and object types to convert natural language goals into JSON goals. + +# I want a sandwich with a patty, cheese, and lettuce, and get ready to give me a glass of milk. +{{"Holding": [["robot", "milk"]], "On": [["bread0", "board"], ["bread1", "lettuce0"], ["lettuce0", "cheese0"], ["cheese0", "patty0"], ["patty0", "bread0"]]}} +""" + return prompt + + def _get_vlm_goal_prompt_prefix(self, + object_names: Collection[str]) -> str: + # pylint:disable=line-too-long + available_predicates = ", ".join([p for p in sorted([pred.pretty_str()[1] for pred in self._curr_env.goal_predicates if 'Inside(' in pred.pretty_str()[1]])]) + available_object_types = ", ".join(sorted([t.name for t in _ALL_TYPES])) + # We could extract the object names, but this is simpler. + prompt = f"""# The available predicates are: {available_predicates} +# The available object types are: {available_object_types} +# Use the available predicates and object types to convert natural language goals into JSON goals. + +# I want a sandwich with a patty, cheese, and lettuce, and get ready to give me a glass of milk. +{{"Holding": [["robot", "milk"]], "On": [["bread0", "board"], ["bread1", "lettuce0"], ["lettuce0", "cheese0"], ["cheese0", "patty0"], ["patty0", "bread0"]]}} +""" + return prompt + + def _parse_vlm_goal_from_state( + self, state: State, language_goal: str, + id_to_obj: Dict[str, Object]) -> Set[GroundAtom]: + """Helper for parsing language-based goals from JSON task specs.""" + ### + language_goal = 'place two empty cups into the cardboard box' + pred_name_to_pred = {p.name: p for p in self._curr_env.predicates} + Inside = pred_name_to_pred["Inside"] + return {GroundAtom(Inside, [Object("cup", _container_type), Object("cardboard_box", _container_type)]), + GroundAtom(Inside, [Object("cup1", _container_type), Object("cardboard_box", _container_type)])} + ### + object_names = set(id_to_obj) + prompt_prefix = self._get_vlm_goal_prompt_prefix(object_names) + prompt = prompt_prefix + f"\n# {language_goal}" + image_list = [ + PIL.Image.fromarray(v.rotated_rgb) for k, v in state.camera_images.items() + ] + responses = vlm.sample_completions( + prompt=prompt, + imgs=image_list, + temperature=0.1, + seed=int(time.time()), + num_completions=1, + ) + response = responses[0] + # Currently assumes that the LLM is perfect. In the future, will need + # to handle various errors and perhaps query the LLM for multiple + # responses until we find one that can be parsed. + try: + goal_spec = json.loads(response) + except json.JSONDecodeError as e: + goal_spec = json.loads(response.replace('`', '').replace('json', '')) + + for pred, args in goal_spec.items(): + for arg in args: + for obj_name in arg: + if 'robot' in obj_name: + id_to_obj[obj_name] = Object(obj_name, _robot_type) + elif any([name in obj_name for name in ['cup', 'box', 'bin']]): + id_to_obj[obj_name] = Object(obj_name, _container_type) + else: + id_to_obj[obj_name] = Object(obj_name, _movable_object_type) + + new_goal = self._curr_env._parse_goal_from_json(goal_spec, id_to_obj) + return new_goal + diff --git a/predicators/planning.py b/predicators/planning.py index efa47defb6..62d0a3eb85 100644 --- a/predicators/planning.py +++ b/predicators/planning.py @@ -1138,7 +1138,6 @@ def _sesame_plan_with_fast_downward( sas_file = generate_sas_file_for_fd(task, nsrts, predicates, types, timeout, timeout_cmd, alias_flag, exec_str, objects, init_atoms) - while True: skeleton, atoms_sequence, metrics = fd_plan_from_sas_file( sas_file, timeout_cmd, timeout, exec_str, alias_flag, start_time, diff --git a/predicators/spot_utils/utils.py b/predicators/spot_utils/utils.py index dfa9381700..940b615b67 100644 --- a/predicators/spot_utils/utils.py +++ b/predicators/spot_utils/utils.py @@ -47,6 +47,11 @@ DEFAULT_HAND_POST_DUMP_POSE = math_helpers.SE3Pose( x=0.80, y=0.0, z=0.25, rot=math_helpers.Quat.from_pitch(np.pi / 2)) DEFAULT_SIM_ROBOT_Z_OFFSET = 0.6 +DEFAULT_HAND_LOOK_FROM_TOP = math_helpers.SE3Pose(x=0.80, + y=0.0, + z=0.1, + rot=math_helpers.Quat.from_pitch( + 5*np.pi / 12)) # Spot-specific types. diff --git a/problem.pddl b/problem.pddl new file mode 100644 index 0000000000..a55ad0cf77 --- /dev/null +++ b/problem.pddl @@ -0,0 +1,24 @@ +(define (problem myproblem) (:domain mydomain) + (:objects + block0 - block + block1 - block + block2 - block + block3 - block + block4 - block + robby - robot + ) + (:init + (Clear block4) + (GripperOpen robby) + (On block1 block0) + (On block2 block1) + (On block3 block2) + (On block4 block3) + (OnTable block0) + ) + (:goal (or (On block1 block0) + (On block2 block1) + (On block4 block3) + (OnTable block0) + (OnTable block3))) +) \ No newline at end of file diff --git a/right_fisheye_image.png b/right_fisheye_image.png new file mode 100644 index 0000000000..618054e5f3 Binary files /dev/null and b/right_fisheye_image.png differ diff --git a/sas.txt b/sas.txt new file mode 100644 index 0000000000..3645a58929 --- /dev/null +++ b/sas.txt @@ -0,0 +1,701 @@ +begin_version +3 +end_version +begin_metric +0 +end_metric +12 +begin_variable +var0 +-1 +6 +Atom holding(block0) +Atom on(block0, block1) +Atom on(block0, block2) +Atom on(block0, block3) +Atom on(block0, block4) +Atom ontable(block0) +end_variable +begin_variable +var1 +-1 +6 +Atom holding(block1) +Atom on(block1, block0) +Atom on(block1, block2) +Atom on(block1, block3) +Atom on(block1, block4) +Atom ontable(block1) +end_variable +begin_variable +var2 +-1 +6 +Atom holding(block2) +Atom on(block2, block0) +Atom on(block2, block1) +Atom on(block2, block3) +Atom on(block2, block4) +Atom ontable(block2) +end_variable +begin_variable +var3 +-1 +6 +Atom holding(block3) +Atom on(block3, block0) +Atom on(block3, block1) +Atom on(block3, block2) +Atom on(block3, block4) +Atom ontable(block3) +end_variable +begin_variable +var4 +-1 +6 +Atom holding(block4) +Atom on(block4, block0) +Atom on(block4, block1) +Atom on(block4, block2) +Atom on(block4, block3) +Atom ontable(block4) +end_variable +begin_variable +var5 +-1 +2 +Atom clear(block0) +NegatedAtom clear(block0) +end_variable +begin_variable +var6 +-1 +2 +Atom clear(block1) +NegatedAtom clear(block1) +end_variable +begin_variable +var7 +-1 +2 +Atom clear(block2) +NegatedAtom clear(block2) +end_variable +begin_variable +var8 +-1 +2 +Atom clear(block3) +NegatedAtom clear(block3) +end_variable +begin_variable +var9 +-1 +2 +Atom clear(block4) +NegatedAtom clear(block4) +end_variable +begin_variable +var10 +-1 +2 +Atom gripperopen(robby) +NegatedAtom gripperopen(robby) +end_variable +begin_variable +var11 +0 +2 +Atom new-axiom@0() +NegatedAtom new-axiom@0() +end_variable +6 +begin_mutex_group +6 +5 0 +0 0 +1 1 +2 1 +3 1 +4 1 +end_mutex_group +begin_mutex_group +6 +6 0 +0 1 +1 0 +2 2 +3 2 +4 2 +end_mutex_group +begin_mutex_group +6 +7 0 +0 2 +1 2 +2 0 +3 3 +4 3 +end_mutex_group +begin_mutex_group +6 +8 0 +0 3 +1 3 +2 3 +3 0 +4 4 +end_mutex_group +begin_mutex_group +6 +9 0 +0 4 +1 4 +2 4 +3 4 +4 0 +end_mutex_group +begin_mutex_group +6 +10 0 +0 0 +1 0 +2 0 +3 0 +4 0 +end_mutex_group +begin_state +5 +1 +2 +3 +4 +1 +1 +1 +1 +0 +0 +1 +end_state +begin_goal +1 +11 0 +end_goal +50 +begin_operator +pickfromtable block0 robby +0 +3 +0 5 0 1 +0 10 0 1 +0 0 5 0 +1 +end_operator +begin_operator +pickfromtable block1 robby +0 +3 +0 6 0 1 +0 10 0 1 +0 1 5 0 +1 +end_operator +begin_operator +pickfromtable block2 robby +0 +3 +0 7 0 1 +0 10 0 1 +0 2 5 0 +1 +end_operator +begin_operator +pickfromtable block3 robby +0 +3 +0 8 0 1 +0 10 0 1 +0 3 5 0 +1 +end_operator +begin_operator +pickfromtable block4 robby +0 +3 +0 9 0 1 +0 10 0 1 +0 4 5 0 +1 +end_operator +begin_operator +putontable block0 robby +0 +3 +0 5 -1 0 +0 10 -1 0 +0 0 0 5 +1 +end_operator +begin_operator +putontable block1 robby +0 +3 +0 6 -1 0 +0 10 -1 0 +0 1 0 5 +1 +end_operator +begin_operator +putontable block2 robby +0 +3 +0 7 -1 0 +0 10 -1 0 +0 2 0 5 +1 +end_operator +begin_operator +putontable block3 robby +0 +3 +0 8 -1 0 +0 10 -1 0 +0 3 0 5 +1 +end_operator +begin_operator +putontable block4 robby +0 +3 +0 9 -1 0 +0 10 -1 0 +0 4 0 5 +1 +end_operator +begin_operator +stack block0 block1 robby +0 +4 +0 5 -1 0 +0 6 0 1 +0 10 -1 0 +0 0 0 1 +1 +end_operator +begin_operator +stack block0 block2 robby +0 +4 +0 5 -1 0 +0 7 0 1 +0 10 -1 0 +0 0 0 2 +1 +end_operator +begin_operator +stack block0 block3 robby +0 +4 +0 5 -1 0 +0 8 0 1 +0 10 -1 0 +0 0 0 3 +1 +end_operator +begin_operator +stack block0 block4 robby +0 +4 +0 5 -1 0 +0 9 0 1 +0 10 -1 0 +0 0 0 4 +1 +end_operator +begin_operator +stack block1 block0 robby +0 +4 +0 5 0 1 +0 6 -1 0 +0 10 -1 0 +0 1 0 1 +1 +end_operator +begin_operator +stack block1 block2 robby +0 +4 +0 6 -1 0 +0 7 0 1 +0 10 -1 0 +0 1 0 2 +1 +end_operator +begin_operator +stack block1 block3 robby +0 +4 +0 6 -1 0 +0 8 0 1 +0 10 -1 0 +0 1 0 3 +1 +end_operator +begin_operator +stack block1 block4 robby +0 +4 +0 6 -1 0 +0 9 0 1 +0 10 -1 0 +0 1 0 4 +1 +end_operator +begin_operator +stack block2 block0 robby +0 +4 +0 5 0 1 +0 7 -1 0 +0 10 -1 0 +0 2 0 1 +1 +end_operator +begin_operator +stack block2 block1 robby +0 +4 +0 6 0 1 +0 7 -1 0 +0 10 -1 0 +0 2 0 2 +1 +end_operator +begin_operator +stack block2 block3 robby +0 +4 +0 7 -1 0 +0 8 0 1 +0 10 -1 0 +0 2 0 3 +1 +end_operator +begin_operator +stack block2 block4 robby +0 +4 +0 7 -1 0 +0 9 0 1 +0 10 -1 0 +0 2 0 4 +1 +end_operator +begin_operator +stack block3 block0 robby +0 +4 +0 5 0 1 +0 8 -1 0 +0 10 -1 0 +0 3 0 1 +1 +end_operator +begin_operator +stack block3 block1 robby +0 +4 +0 6 0 1 +0 8 -1 0 +0 10 -1 0 +0 3 0 2 +1 +end_operator +begin_operator +stack block3 block2 robby +0 +4 +0 7 0 1 +0 8 -1 0 +0 10 -1 0 +0 3 0 3 +1 +end_operator +begin_operator +stack block3 block4 robby +0 +4 +0 8 -1 0 +0 9 0 1 +0 10 -1 0 +0 3 0 4 +1 +end_operator +begin_operator +stack block4 block0 robby +0 +4 +0 5 0 1 +0 9 -1 0 +0 10 -1 0 +0 4 0 1 +1 +end_operator +begin_operator +stack block4 block1 robby +0 +4 +0 6 0 1 +0 9 -1 0 +0 10 -1 0 +0 4 0 2 +1 +end_operator +begin_operator +stack block4 block2 robby +0 +4 +0 7 0 1 +0 9 -1 0 +0 10 -1 0 +0 4 0 3 +1 +end_operator +begin_operator +stack block4 block3 robby +0 +4 +0 8 0 1 +0 9 -1 0 +0 10 -1 0 +0 4 0 4 +1 +end_operator +begin_operator +unstack block0 block1 robby +0 +4 +0 5 0 1 +0 6 -1 0 +0 10 0 1 +0 0 1 0 +1 +end_operator +begin_operator +unstack block0 block2 robby +0 +4 +0 5 0 1 +0 7 -1 0 +0 10 0 1 +0 0 2 0 +1 +end_operator +begin_operator +unstack block0 block3 robby +0 +4 +0 5 0 1 +0 8 -1 0 +0 10 0 1 +0 0 3 0 +1 +end_operator +begin_operator +unstack block0 block4 robby +0 +4 +0 5 0 1 +0 9 -1 0 +0 10 0 1 +0 0 4 0 +1 +end_operator +begin_operator +unstack block1 block0 robby +0 +4 +0 5 -1 0 +0 6 0 1 +0 10 0 1 +0 1 1 0 +1 +end_operator +begin_operator +unstack block1 block2 robby +0 +4 +0 6 0 1 +0 7 -1 0 +0 10 0 1 +0 1 2 0 +1 +end_operator +begin_operator +unstack block1 block3 robby +0 +4 +0 6 0 1 +0 8 -1 0 +0 10 0 1 +0 1 3 0 +1 +end_operator +begin_operator +unstack block1 block4 robby +0 +4 +0 6 0 1 +0 9 -1 0 +0 10 0 1 +0 1 4 0 +1 +end_operator +begin_operator +unstack block2 block0 robby +0 +4 +0 5 -1 0 +0 7 0 1 +0 10 0 1 +0 2 1 0 +1 +end_operator +begin_operator +unstack block2 block1 robby +0 +4 +0 6 -1 0 +0 7 0 1 +0 10 0 1 +0 2 2 0 +1 +end_operator +begin_operator +unstack block2 block3 robby +0 +4 +0 7 0 1 +0 8 -1 0 +0 10 0 1 +0 2 3 0 +1 +end_operator +begin_operator +unstack block2 block4 robby +0 +4 +0 7 0 1 +0 9 -1 0 +0 10 0 1 +0 2 4 0 +1 +end_operator +begin_operator +unstack block3 block0 robby +0 +4 +0 5 -1 0 +0 8 0 1 +0 10 0 1 +0 3 1 0 +1 +end_operator +begin_operator +unstack block3 block1 robby +0 +4 +0 6 -1 0 +0 8 0 1 +0 10 0 1 +0 3 2 0 +1 +end_operator +begin_operator +unstack block3 block2 robby +0 +4 +0 7 -1 0 +0 8 0 1 +0 10 0 1 +0 3 3 0 +1 +end_operator +begin_operator +unstack block3 block4 robby +0 +4 +0 8 0 1 +0 9 -1 0 +0 10 0 1 +0 3 4 0 +1 +end_operator +begin_operator +unstack block4 block0 robby +0 +4 +0 5 -1 0 +0 9 0 1 +0 10 0 1 +0 4 1 0 +1 +end_operator +begin_operator +unstack block4 block1 robby +0 +4 +0 6 -1 0 +0 9 0 1 +0 10 0 1 +0 4 2 0 +1 +end_operator +begin_operator +unstack block4 block2 robby +0 +4 +0 7 -1 0 +0 9 0 1 +0 10 0 1 +0 4 3 0 +1 +end_operator +begin_operator +unstack block4 block3 robby +0 +4 +0 8 -1 0 +0 9 0 1 +0 10 0 1 +0 4 4 0 +1 +end_operator +5 +begin_rule +1 +0 5 +11 1 0 +end_rule +begin_rule +1 +1 1 +11 1 0 +end_rule +begin_rule +1 +2 2 +11 1 0 +end_rule +begin_rule +1 +3 5 +11 1 0 +end_rule +begin_rule +1 +4 4 +11 1 0 +end_rule