libero_plus_lerobot / downsampling.py
s1ghhh's picture
Add files using upload-large-folder tool
3893ae1 verified
"""
Downsample LeRobot v2.x local dataset by episode (default 10%) and save to a new directory.
This script does not rely on HuggingFace's filter/save_to_disk; it directly utilizes the disk layout of the LeRobot dataset:
- meta/episodes.jsonl One episode per line (contains episode_index and length)
- meta/episodes_stats.jsonl Statistics for one episode per line (contains episode_index)
- data/chunk-XXX/episode_XXXXXX.parquet
- videos/chunk-XXX/<video_key>/episode_XXXXXX.mp4
"""
from __future__ import annotations
import argparse
import json
import math
import random
import shutil
from pathlib import Path
from typing import Dict, Iterable, List, Sequence, Set, Tuple
def _read_jsonl(path: Path) -> Iterable[dict]:
with path.open("r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
yield json.loads(line)
def _write_jsonl(path: Path, rows: Iterable[dict]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("w", encoding="utf-8") as f:
for r in rows:
f.write(json.dumps(r, ensure_ascii=False) + "\n")
def _copy_file(src: Path, dst: Path) -> None:
dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy2(src, dst)
def _detect_video_keys(videos_dir: Path) -> List[str]:
"""
Read subdirectory names under videos/chunk-000 as the list of video_keys.
Example: ["observation.images.front", "observation.images.wrist"]
"""
chunk0 = videos_dir / "chunk-000"
if not chunk0.exists():
return []
keys: List[str] = []
for p in chunk0.iterdir():
if p.is_dir():
keys.append(p.name)
keys.sort()
return keys
def _sample_episode_indices_from_rows(
episodes_rows: Sequence[dict],
fraction: float,
seed: int,
) -> Tuple[List[int], Dict[int, int]]:
"""Sample from rows of episodes.jsonl.
Returns:
- List of selected old episode_indices (ascending)
- Mapping of old episode_index -> length (containing only selected episodes)
"""
if not episodes_rows:
raise RuntimeError("episodes_rows is empty, cannot sample")
episode_ids = [int(r["episode_index"]) for r in episodes_rows]
lengths_all = {int(r["episode_index"]): int(r["length"]) for r in episodes_rows}
total = len(episode_ids)
k = int(math.floor(total * fraction))
k = max(1, min(total, k))
rng = random.Random(seed)
chosen = rng.sample(episode_ids, k=k)
chosen.sort()
lengths = {ep: lengths_all[ep] for ep in chosen}
return chosen, lengths
def _rewrite_episode_index_in_parquet(src_parquet: Path, dst_parquet: Path, new_episode_index: int) -> None:
"""Write a new parquet file and replace the episode_index column with new_episode_index."""
try:
import pyarrow as pa # type: ignore
import pyarrow.parquet as pq # type: ignore
except Exception as e: # pragma: no cover
raise RuntimeError(
"pyarrow is required to rewrite episode_index in parquet. "
"Please install pyarrow in the current environment (usually included in LeRobot/datasets)."
) from e
table = pq.read_table(src_parquet)
if "episode_index" not in table.column_names:
raise KeyError(f"{src_parquet} missing episode_index column, cannot reindex")
col_idx = table.schema.get_field_index("episode_index")
new_col = pa.array([int(new_episode_index)] * table.num_rows, type=table.schema.field(col_idx).type)
table = table.set_column(col_idx, "episode_index", new_col)
dst_parquet.parent.mkdir(parents=True, exist_ok=True)
pq.write_table(table, dst_parquet)
def _filter_jsonl_by_episode_index(
src_path: Path,
dst_path: Path,
keep: Set[int],
) -> None:
def gen() -> Iterable[dict]:
for row in _read_jsonl(src_path):
if int(row.get("episode_index", -1)) in keep:
yield row
_write_jsonl(dst_path, gen())
def _update_info_json(
src_info_path: Path,
dst_info_path: Path,
selected_episodes: Sequence[int],
selected_lengths: Dict[int, int],
video_keys: Sequence[str],
) -> None:
info = json.loads(src_info_path.read_text(encoding="utf-8"))
total_episodes = len(selected_episodes)
total_frames = int(sum(selected_lengths.get(ep, 0) for ep in selected_episodes))
# Maintain chunks_size and path templates, only update stats and split ranges
info["total_episodes"] = total_episodes
info["total_frames"] = total_frames
if "splits" in info and isinstance(info["splits"], dict):
info["splits"]["train"] = f"0:{total_episodes}"
# Number of videos usually equals number of episodes * number of video_keys;
# If videos are not copied (video_keys is empty), set total_videos to 0 to avoid inconsistency.
info["total_videos"] = int(total_episodes * len(video_keys)) if video_keys else 0
# total_chunks: Max chunk covered by selected episodes + 1 (at least 1)
chunks_size = int(info.get("chunks_size", 1000))
max_chunk = max(int(ep) // chunks_size for ep in selected_episodes) if selected_episodes else 0
info["total_chunks"] = int(max_chunk + 1)
dst_info_path.parent.mkdir(parents=True, exist_ok=True)
dst_info_path.write_text(json.dumps(info, ensure_ascii=False, indent=4) + "\n", encoding="utf-8")
def downsample_episodes(
repo_id: Path,
out_repo_id: Path,
fraction: float = 0.1,
seed: int = 0,
copy_videos: bool = True,
overwrite: bool = False,
reindex_episodes: bool = True,
) -> None:
repo_id = repo_id.resolve()
out_repo_id = out_repo_id.resolve()
if out_repo_id.exists():
if not overwrite:
raise FileExistsError(f"Output directory already exists: {out_repo_id} (use --overwrite to overwrite)")
shutil.rmtree(out_repo_id)
# Check necessary files
src_meta = repo_id / "meta"
src_data = repo_id / "data"
src_videos = repo_id / "videos"
src_info = src_meta / "info.json"
src_episodes = src_meta / "episodes.jsonl"
src_stats = src_meta / "episodes_stats.jsonl"
for p in (src_info, src_episodes, src_data):
if not p.exists():
raise FileNotFoundError(f"Missing necessary path: {p}")
# Read episodes.jsonl and sample (here selected is 'old episode_index')
episodes_rows = list(_read_jsonl(src_episodes))
selected_old, lengths_old = _sample_episode_indices_from_rows(episodes_rows, fraction=fraction, seed=seed)
keep_old_set = set(selected_old)
# Critical: LeRobotDataset defaults to iterating range(total_episodes) to generate file paths,
# so to "load directly with new path", episode indices must be 0..N-1 consecutive,
# and the episode_index column inside parquet must also match.
if reindex_episodes:
old_to_new = {old_ep: new_ep for new_ep, old_ep in enumerate(selected_old)}
selected_new = list(range(len(selected_old)))
lengths_new = {old_to_new[old_ep]: lengths_old[old_ep] for old_ep in selected_old}
else:
old_to_new = {old_ep: old_ep for old_ep in selected_old}
selected_new = selected_old
lengths_new = lengths_old
# Read chunks_size / video_keys
info = json.loads(src_info.read_text(encoding="utf-8"))
chunks_size = int(info.get("chunks_size", 1000))
video_keys = _detect_video_keys(src_videos) if copy_videos else []
# Output directory skeleton
(out_repo_id / "meta").mkdir(parents=True, exist_ok=True)
(out_repo_id / "data").mkdir(parents=True, exist_ok=True)
if copy_videos:
(out_repo_id / "videos").mkdir(parents=True, exist_ok=True)
# Generate meta/episodes.jsonl (output by new episode_index, ensuring 0..N-1)
episodes_by_old = {int(r["episode_index"]): r for r in episodes_rows}
def gen_new_episodes_jsonl() -> Iterable[dict]:
for old_ep in selected_old:
r = dict(episodes_by_old[old_ep])
r["episode_index"] = int(old_to_new[old_ep])
yield r
_write_jsonl(out_repo_id / "meta" / "episodes.jsonl", gen_new_episodes_jsonl())
# Generate meta/episodes_stats.jsonl (also reindex, and fix internal stats['episode_index'])
if src_stats.exists():
stats_by_old: Dict[int, dict] = {}
for row in _read_jsonl(src_stats):
old_ep = int(row.get("episode_index", -1))
if old_ep in keep_old_set:
stats_by_old[old_ep] = row
def gen_new_stats_jsonl() -> Iterable[dict]:
for old_ep in selected_old:
row = dict(stats_by_old[old_ep])
new_ep = int(old_to_new[old_ep])
row["episode_index"] = new_ep
st = row.get("stats")
if isinstance(st, dict) and "episode_index" in st and isinstance(st["episode_index"], dict):
epst = dict(st["episode_index"])
epst["min"] = [new_ep]
epst["max"] = [new_ep]
epst["mean"] = [float(new_ep)]
epst["std"] = [0.0]
st["episode_index"] = epst
row["stats"] = st
yield row
_write_jsonl(out_repo_id / "meta" / "episodes_stats.jsonl", gen_new_stats_jsonl())
# Copy tasks.jsonl as is (works fine without trimming)
src_tasks = src_meta / "tasks.jsonl"
if src_tasks.exists():
_copy_file(src_tasks, out_repo_id / "meta" / "tasks.jsonl")
_update_info_json(
src_info_path=src_info,
dst_info_path=out_repo_id / "meta" / "info.json",
selected_episodes=selected_new,
selected_lengths=lengths_new,
video_keys=video_keys,
)
# Optional: Copy README / .gitattributes (doesn't affect loading, but good for management)
for extra in (repo_id / "README.md", repo_id / ".gitattributes"):
if extra.exists():
_copy_file(extra, out_repo_id / extra.name)
# Copy/rewrite data / videos files
for old_ep in selected_old:
new_ep = int(old_to_new[old_ep])
old_chunk = int(old_ep) // chunks_size
new_chunk = int(new_ep) // chunks_size
old_name = f"episode_{int(old_ep):06d}"
new_name = f"episode_{int(new_ep):06d}"
src_parquet = src_data / f"chunk-{old_chunk:03d}" / f"{old_name}.parquet"
dst_parquet = out_repo_id / "data" / f"chunk-{new_chunk:03d}" / f"{new_name}.parquet"
if not src_parquet.exists():
raise FileNotFoundError(f"Parquet not found: {src_parquet}")
if reindex_episodes:
_rewrite_episode_index_in_parquet(src_parquet, dst_parquet, new_episode_index=new_ep)
else:
_copy_file(src_parquet, dst_parquet)
if copy_videos and video_keys:
for key in video_keys:
src_mp4 = src_videos / f"chunk-{old_chunk:03d}" / key / f"{old_name}.mp4"
if src_mp4.exists():
dst_mp4 = out_repo_id / "videos" / f"chunk-{new_chunk:03d}" / key / f"{new_name}.mp4"
_copy_file(src_mp4, dst_mp4)
print(
"Downsampling complete:\n"
f"- Input: {repo_id}\n"
f"- Output: {out_repo_id}\n"
f"- Selected episodes: {len(selected_old)} / {json.loads(src_info.read_text(encoding='utf-8')).get('total_episodes', 'unknown')}\n"
f"- reindex_episodes: {reindex_episodes}\n"
f"- copy_videos: {copy_videos} (video_keys={video_keys})"
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--repo_id", type=str, default="/workspace/ghsun/libero_plus_lerobot", help="Input dataset directory")
parser.add_argument(
"--out_repo_id",
type=str,
default="/workspace/ghsun/libero_plus_lerobot_10pct",
help="Output dataset directory",
)
parser.add_argument("--fraction", type=float, default=0.1, help="Fraction of episodes to sample (default 0.1)")
parser.add_argument("--seed", type=int, default=0, help="Random seed (for reproducibility)")
parser.add_argument("--no_videos", action="store_true", help="Do not copy videos (only copy parquet and meta)")
parser.add_argument(
"--keep_original_episode_index",
action="store_true",
help="Keep original episode_index (do not reindex). Note: New directory cannot be loaded directly as repo_id, must pass episodes=original_index_list when creating LeRobotDataset.",
)
parser.add_argument("--overwrite", action="store_true", help="Delete and recreate if output directory exists")
args = parser.parse_args()
downsample_episodes(
repo_id=Path(args.repo_id),
out_repo_id=Path(args.out_repo_id),
fraction=args.fraction,
seed=args.seed,
copy_videos=not args.no_videos,
overwrite=args.overwrite,
reindex_episodes=not args.keep_original_episode_index,
)
if __name__ == "__main__":
main()