Robustness Gym is a Python evaluation toolkit for natural language processing.
- Examples & tutorials
- More documentation
- Contributing guidelines
Robustness Gym is being developed to address challenges in evaluating machine learning models today. You can read more about the ideas underlying Robustness Gym in our paper on arXiv. We also have a website.
The Robustness Gym project is an ongoing collaboration between Stanford Hazy Research, Salesforce Research and UNC Chapel-Hill.
Note: Robustness Gym is in alpha, so expect frequent updates in the coming weeks and months. Reach out to kgoel [at] cs [dot] stanford [dot] edu if you'd like to become an active contributor, or if you work on an interesting NLP task that you'd like to see supported. Feel free to raise issues on GitHub for bugs/feature requests.
pip install robustnessgym
# robustnessgym.Dataset wraps datasets.Dataset
from robustnessgym import Dataset
# Use Dataset.load_dataset(..) exactly like datasets.load_dataset(..)
dataset = Dataset.load_dataset('boolq')
dataset = Dataset.load_dataset('boolq', split='train[:10]')
# Get a dataset
from robustnessgym import Dataset
dataset = Dataset.load_dataset('boolq')
# Run the Spacy pipeline
from robustnessgym import Spacy
spacy = Spacy()
# .. on the 'question' column of the dataset
dataset = spacy(batch_or_dataset=dataset,
columns=['question'])
# Run the Stanza pipeline
from robustnessgym import Stanza
stanza = Stanza()
# .. on both the question and passage columns of a batch
dataset = stanza(batch_or_dataset=dataset[:32],
columns=['question', 'passage'])
# .. use any of the other built-in operations in Robustness Gym!
# Or, create your own CachedOperation
from robustnessgym import CachedOperation, Identifier
from robustnessgym.core.decorators import singlecolumn
# Write a silly function that operates on a single column of a batch
@singlecolumn
def silly_fn(batch, columns):
"""
Capitalize text in the specified column of the batch.
"""
column_name = columns[0]
assert type(batch[column_name]) == str, "Must apply to text column."
return [text.capitalize() for text in batch[column_name]]
# Wrap the silly function in a CachedOperation
silly_op = CachedOperation(apply_fn=silly_fn,
identifier=Identifier(_name='SillyOp'))
# Apply it to a dataset
dataset = silly_op(batch_or_dataset=dataset,
columns=['question'])
from robustnessgym import Spacy, Stanza, CachedOperation
# Take a batch of data
batch = dataset[:32]
# Retrieve the (cached) results of the Spacy CachedOperation
spacy_information = Spacy.retrieve(batch, columns=['question'])
# Retrieve the tokens returned by the Spacy CachedOperation
tokens = Spacy.retrieve(batch, columns=['question'], proc_fns=Spacy.tokens)
# Retrieve the entities found by the Stanza CachedOperation
entities = Stanza.retrieve(batch, columns=['passage'], proc_fns=Stanza.entities)
# Retrieve the capitalized output of the silly_op
capitalizations = CachedOperation.retrieve(batch,
columns=['question'],
identifier=silly_op.identifier)
# Retrieve it directly using the silly_op
capitalizations = silly_op.retrieve(batch, columns=['question'])
# Retrieve the capitalized output and lower-case it during retrieval
capitalizations = silly_op.retrieve(
batch,
columns=['question'],
proc_fns=lambda decoded_batch: [x.lower() for x in decoded_batch]
)
from robustnessgym import Spacy, ScoreSubpopulation
def length(batch, columns):
"""
Length using cached Spacy tokenization.
"""
column_name = columns[0]
# Take advantage of previously cached Spacy informations
tokens = Spacy.retrieve(batch, columns, proc_fns=Spacy.tokens)[column_name]
return [len(tokens_) for tokens_ in tokens]
# Create a subpopulation that buckets examples based on length
length_subpopulation = ScoreSubpopulation(intervals=[(0, 10), (10, 20)],
score_fn=length)
dataset, slices, membership = length_subpopulation(dataset, columns=['question'])
# dataset is updated with slice information
# slices is a list of 2 Slice objects
# membership is a matrix of shape (n x 2)