This repository has been archived by the owner on Dec 16, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 174
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
44 changed files
with
8,972 additions
and
99 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,61 @@ | ||
""" | ||
A specification for defining task cards (derived from model cards). | ||
Motivation: A model's capabilities and limitations are dependent on | ||
the task definition. Thus, it is helpful to separate the information | ||
in the model card that comes from specifically the task itself. | ||
""" | ||
|
||
from typing import Dict, List, Optional, Union | ||
from dataclasses import dataclass | ||
|
||
from allennlp.common.from_params import FromParams | ||
|
||
|
||
@dataclass(frozen=True) | ||
class TaskCard(FromParams): | ||
""" | ||
The `TaskCard` stores information about the task. It is modeled after the | ||
`ModelCard`. | ||
# Parameters | ||
id : `str` | ||
The task id. | ||
Example: `"rc"` for reading comprehension. | ||
name : `str`, optional | ||
The (display) name of the task. | ||
description : `str`, optional | ||
Description of the task. | ||
Example: "Textual Entailment (TE) is the task of predicting whether, | ||
for a pair of sentences, the facts in the first sentence necessarily | ||
imply the facts in the second." | ||
expected_inputs : `str`, optional | ||
All expected inputs and their format. | ||
Example: (For a reading comprehension task) | ||
Passage (text string), Question (text string) | ||
expected_outputs : `str`, optional | ||
All expected outputs and their format. | ||
Example: (For a reading comprehension task) | ||
Answer span (start token position and end token position). | ||
examples : `Union[List[Dict[str, str]], Dict[str, List[Dict[str, str]]]]`, optional | ||
List of examples for the task. Each example dict should contain as keys the | ||
`expected_inputs`. | ||
Example: (For textual entailment) | ||
[{"premise": "A handmade djembe was on display at the Smithsonian.", | ||
"hypothesis": "Visitors could see the djembe."}] | ||
scope_and_limitations: `str`, optional | ||
This discusses the scope of the task based on how it is defined, and any limitations. | ||
Example: "The Textual Entailment task is in some sense "NLP-complete", and you | ||
should not expect any current model to cover every possible aspect of | ||
entailment. Instead, you should think about what the model was trained | ||
on to see whether it could reasonably capture the phenomena that you | ||
are querying it with." | ||
""" | ||
|
||
id: str | ||
name: Optional[str] = None | ||
description: Optional[str] = None | ||
expected_inputs: Optional[str] = None | ||
expected_outputs: Optional[str] = None | ||
scope_and_limitations: Optional[str] = None | ||
examples: Optional[Union[List[Dict[str, str]], Dict[str, List[Dict[str, str]]]]] = None |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
69 changes: 68 additions & 1 deletion
69
allennlp_models/modelcards/pair-classification-decomposable-attention-elmo.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,73 @@ | ||
{ | ||
"id": "pair-classification-decomposable-attention-elmo", | ||
"registered_model_name": "decomposable_attention", | ||
"registered_predictor_name": "textual_entailment", | ||
"display_name": "ELMo-based Decomposable Attention", | ||
"archive_file": "decomposable-attention-elmo-2020.04.09.tar.gz" | ||
"task_id": "textual_entailment", | ||
"archive_file": "decomposable-attention-elmo-2020.04.09.tar.gz", | ||
"model_details": { | ||
"description": "This `Model` implements the Decomposable Attention model described in [A Decomposable | ||
Attention Model for Natural Language Inference](https://api.semanticscholar.org/CorpusID:8495258) | ||
by Parikh et al., 2016, with some optional enhancements before the decomposable attention | ||
actually happens. Parikh's original model allowed for computing an \"intra-sentence\" attention | ||
before doing the decomposable entailment step. We generalize this to any | ||
[`Seq2SeqEncoder`](../modules/seq2seq_encoders/seq2seq_encoder.md) that can be applied to | ||
the premise and/or the hypothesis before computing entailment. | ||
|
||
The basic outline of this model is to get an embedded representation of each word in the | ||
premise and hypothesis, align words between the two, compare the aligned phrases, and make a | ||
final entailment decision based on this aggregated comparison. Each step in this process uses | ||
a feedforward network to modify the representation. | ||
|
||
This model uses ELMo embeddings.", | ||
"developed_by": "Parikh et al", | ||
"contributed_by": "Dirk Groeneveld", | ||
"date": "2020-04-09", | ||
"version": "1", | ||
"model_type": "Seq2Seq", | ||
"paper": "[A Decomposable Attention Model for Natural Language Inference](https://api.semanticscholar.org/CorpusID:8495258)", | ||
"citation": "@article{Parikh2016ADA, | ||
title={A Decomposable Attention Model for Natural Language Inference}, | ||
author={Ankur P. Parikh and Oscar T{\"a}ckstr{\"o}m and Dipanjan Das and Jakob Uszkoreit}, | ||
journal={ArXiv}, | ||
year={2016}, | ||
volume={abs/1606.01933}}", | ||
"license": null, | ||
"contact": "[email protected]", | ||
"training_config": "decomposable_attention_elmo.jsonnet", | ||
}, | ||
"intended_use": { | ||
"primary_uses": null, | ||
"primary_users": null, | ||
"out_of_scope_use_cases": null | ||
}, | ||
"factors": { | ||
"relevant_factors": null, | ||
"evaluation_factors": null | ||
}, | ||
"metrics": { | ||
"model_performance_measures": "Accuracy", | ||
"decision_thresholds": null, | ||
"variation_approaches": null | ||
}, | ||
"evaluation_data": { | ||
"dataset": "[Stanford Natural Language Inference (SNLI)](https://nlp.stanford.edu/projects/snli/) dev set", | ||
"motivation": null, | ||
"preprocessing": null | ||
}, | ||
"training_data": { | ||
"dataset": "[Stanford Natural Language Inference (SNLI)](https://nlp.stanford.edu/projects/snli/) train set", | ||
"motivation": null, | ||
"preprocessing": null | ||
}, | ||
"quantitative_analyses": { | ||
"unitary_results": null, | ||
"intersectional_results": null | ||
}, | ||
"ethical_considerations": { | ||
"ethical_considerations": null | ||
}, | ||
"caveats_and_recommendations": { | ||
"caveats_and_recommendations": null | ||
} | ||
} |
Oops, something went wrong.