Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add support to load from any HF dataset and CSV #379

Merged
merged 3 commits into from
Feb 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion dspy/datasets/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from .dataset import Dataset
from .hotpotqa import HotPotQA
from .colors import Colors
from .colors import Colors
from .dataloader import DataLoader
87 changes: 87 additions & 0 deletions dspy/datasets/dataloader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
from dspy.datasets import Dataset

from typing import Union, List
from datasets import load_dataset, ReadInstruction

class DataLoader(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

def _process_dataset(
self,
dataset: Dataset,
fields: List[str] = None
):
train_split_size = self.train_size if self.train_size else 0
dev_split_size = self.dev_size if self.dev_size else 0
test_split_size = self.test_size if self.test_size else 0

if isinstance(train_split_size, float):
train_split_size = int(len(dataset) * train_split_size)

if train_split_size:
tmp_dataset = dataset.train_test_split(test_size=(dev_split_size+test_split_size))
train_dataset = tmp_dataset["train"]
dataset = tmp_dataset["test"]

if isinstance(dev_split_size, float):
dev_split_size = int(len(dataset) * dev_split_size)

if isinstance(test_split_size, float):
test_split_size = int(len(dataset) * test_split_size)

tmp_dataset = dataset.train_test_split(test_size=dev_split_size)
dev_dataset = tmp_dataset["train"]
test_dataset = tmp_dataset["test"]

if train_split_size:
self._train = [{field:row[field] for field in fields} for row in train_dataset]

if dev_split_size:
self._dev = [{field:row[field] for field in fields} for row in dev_dataset]

if test_split_size:
self._test = [{field:row[field] for field in fields} for row in test_dataset]

self.train_size = None
self.dev_size = None
self.test_size = None

def from_huggingface(
self,
dataset_name: str,
fields: List[str] = None,
splits: Union[str, List[str]] = None,
revision: str = None,
):
dataset = None
if splits:
if isinstance(splits, str):
splits = [splits]

try:
ri = sum([ReadInstruction(split) for split in splits])
dataset = load_dataset(dataset_name, split=ri, revision=revision)
except:
raise ValueError("Invalid split name provided. Please provide a valid split name or list of split names.")
else:
dataset = load_dataset(dataset_name, revision=revision)
if len(dataset.keys())==1:
split_name = next(iter(dataset.keys()))
dataset = dataset[split_name]

else:
raise ValueError("No splits provided and dataset has more than one split. At this moment multiple splits will be concatenated into one single split.")

if not fields:
fields = list(dataset.features)

self._process_dataset(dataset, fields)

def from_csv(self, file_path:str, fields: List[str] = None):
dataset = load_dataset("csv", data_files=file_path)["train"]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

just curious on why to keep ["train"] here. wondering if anyone has datasets not sorted to train/dev/test yet who may want to use it.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The idea was to support a single csv and create split from that. So the file_path would need to be a dict in order to support the multiple csv split. Though it's something we can iterate on!!


if not fields:
fields = list(dataset.features)

self._process_dataset(dataset, fields)
Loading