Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add multiturn-conversation dataset process #557

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 17 additions & 9 deletions src/llama_recipes/configs/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,32 +3,40 @@

from dataclasses import dataclass


@dataclass
class samsum_dataset:
dataset: str = "samsum_dataset"
dataset: str = "samsum_dataset"
train_split: str = "train"
test_split: str = "validation"


@dataclass
class grammar_dataset:
dataset: str = "grammar_dataset"
train_split: str = "src/llama_recipes/datasets/grammar_dataset/gtrain_10k.csv"
train_split: str = "src/llama_recipes/datasets/grammar_dataset/gtrain_10k.csv"
test_split: str = "src/llama_recipes/datasets/grammar_dataset/grammar_validation.csv"


@dataclass
class alpaca_dataset:
dataset: str = "alpaca_dataset"
train_split: str = "train"
test_split: str = "val"
data_path: str = "src/llama_recipes/datasets/alpaca_data.json"


@dataclass
class custom_dataset:
dataset: str = "custom_dataset"
file: str = "examples/custom_dataset.py"
train_split: str = "train"
test_split: str = "validation"
test_split: str = "validation"


@dataclass
class sharegpt_dataset:
dataset: str = "sharegpt_dataset"
data_path: str = "src/llama_recipes/datasets/sharegpt_dataset.jsonl"
train_split: str = "train"
test_split: str = "validation"
3 changes: 2 additions & 1 deletion src/llama_recipes/datasets/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@

from llama_recipes.datasets.grammar_dataset.grammar_dataset import get_dataset as get_grammar_dataset
from llama_recipes.datasets.alpaca_dataset import InstructionDataset as get_alpaca_dataset
from llama_recipes.datasets.samsum_dataset import get_preprocessed_samsum as get_samsum_dataset
from llama_recipes.datasets.samsum_dataset import get_preprocessed_samsum as get_samsum_dataset
from llama_recipes.datasets.sharegpt_dataset import ShareGPT_Dataset as sharegpt_dataset
77 changes: 77 additions & 0 deletions src/llama_recipes/datasets/sharegpt_dataset.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# Reference code :https://github.com/yangjianxin1/Firefly/blob/2cbefd968391e024592fb89e058929cf118af071/component/dataset.py#L6
import json
from torch.utils.data import Dataset

SYSTEM_FORMAT = '<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{content}<|eot_id|>\n'
USER_FORMAT = '<|start_header_id|>user<|end_header_id|>\n\n{content}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n'
ASSISTANT_FORMAT = '{content}<|eot_id|>\n'


class ShareGPT_Dataset(Dataset):

def __init__(self, dataset_config, tokenizer, partition="train"):
self.tokenizer = tokenizer
self.system_format = SYSTEM_FORMAT
self.user_format = USER_FORMAT
self.assistant_format = ASSISTANT_FORMAT
self.defaults_system = None

with open(dataset_config.data_path, 'r', encoding='utf8') as f:
data_list = f.readlines()
self.data_list = data_list

if partition == "train":
self.data_list = self.data_list[200:]
else:
self.data_list = self.data_list[:200]

self.tokenizer = tokenizer

def __len__(self):
return len(self.data_list)

def __getitem__(self, index):

IGNORE_INDEX = -100
data = self.data_list[index]
data = json.loads(data)
input_ids, labels = [], []

if self.system_format is not None:
system = data['system'].strip() if 'system' in data.keys() else self.defaults_system

if system is not None:
system_text = self.system_format.format(content=system)
input_ids = self.tokenizer.encode(system_text, add_special_tokens=False)
labels = [IGNORE_INDEX] * len(input_ids)

conversations = data['conversation']

for i, conv in enumerate(conversations):
human = conv.get('human', "").strip()
assistant = conv.get('assistant', "").strip()

human = self.user_format.format(content=human, stop_token=self.tokenizer.eos_token)
assistant = self.assistant_format.format(content=assistant, stop_token=self.tokenizer.eos_token)

input_tokens = self.tokenizer.encode(human, add_special_tokens=False)
output_tokens = self.tokenizer.encode(assistant, add_special_tokens=False)

input_ids += input_tokens + output_tokens
labels += [IGNORE_INDEX] * len(input_tokens) + output_tokens

assert len(input_ids) == len(labels)

input_ids.pop()
labels.pop()
input_ids[-1] = self.tokenizer.eos_token_id
labels[-1] = self.tokenizer.eos_token_id

attention_mask = [True] * len(input_ids)
assert len(input_ids) == len(labels) == len(attention_mask)
inputs = {
'input_ids': input_ids,
'labels': labels,
'attention_mask': attention_mask
}
return inputs
4 changes: 3 additions & 1 deletion src/llama_recipes/utils/dataset_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
get_grammar_dataset,
get_alpaca_dataset,
get_samsum_dataset,
sharegpt_dataset
)


Expand Down Expand Up @@ -54,11 +55,12 @@ def get_custom_dataset(dataset_config, tokenizer, split: str):
"grammar_dataset": get_grammar_dataset,
"samsum_dataset": get_samsum_dataset,
"custom_dataset": get_custom_dataset,
"sharegpt_dataset": sharegpt_dataset
}


def get_preprocessed_dataset(
tokenizer, dataset_config, split: str = "train"
tokenizer, dataset_config, split: str = "train"
) -> torch.utils.data.Dataset:
if not dataset_config.dataset in DATASET_PREPROC:
raise NotImplementedError(f"{dataset_config.dataset} is not (yet) implemented")
Expand Down