Update README.md
Browse files
README.md
CHANGED
|
@@ -52,10 +52,11 @@ T5 model expects a task related prefix: since it is a paraphrasing task, we will
|
|
| 52 |
|
| 53 |
```python
|
| 54 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
|
|
|
| 55 |
|
| 56 |
-
device = "cuda"
|
| 57 |
-
tokenizer = AutoTokenizer.from_pretrained("Ateeqq/Text-Rewriter-Paraphraser"
|
| 58 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("Ateeqq/Text-Rewriter-Paraphraser"
|
| 59 |
|
| 60 |
def generate_title(text):
|
| 61 |
input_ids = tokenizer(f'paraphraser: {text}', return_tensors="pt", padding="longest", truncation=True, max_length=64).input_ids.to(device)
|
|
|
|
| 52 |
|
| 53 |
```python
|
| 54 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
| 55 |
+
import torch
|
| 56 |
|
| 57 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 58 |
+
tokenizer = AutoTokenizer.from_pretrained("Ateeqq/Text-Rewriter-Paraphraser")
|
| 59 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Ateeqq/Text-Rewriter-Paraphraser").to(device)
|
| 60 |
|
| 61 |
def generate_title(text):
|
| 62 |
input_ids = tokenizer(f'paraphraser: {text}', return_tensors="pt", padding="longest", truncation=True, max_length=64).input_ids.to(device)
|