Commit
·
2e1c80b
1
Parent(s):
119215e
chore: update readme and auth token
Browse files
README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
---
|
| 2 |
-
title: Dataset Viber - chat preference
|
| 3 |
emoji: ⚔️
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Dataset Viber - chat preference inference endpoints
|
| 3 |
emoji: ⚔️
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: yellow
|
app.py
CHANGED
|
@@ -5,12 +5,13 @@ from dataset_viber import AnnotatorInterFace
|
|
| 5 |
from datasets import load_dataset
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
|
|
|
|
| 8 |
MODEL_IDS = [
|
| 9 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 10 |
"microsoft/Phi-3-mini-4k-instruct",
|
| 11 |
"mistralai/Mistral-7B-Instruct-v0.2"
|
| 12 |
]
|
| 13 |
-
CLIENTS = [InferenceClient(model_id, token=os.environ["
|
| 14 |
|
| 15 |
dataset = load_dataset("argilla/magpie-ultra-v0.1", split="train")
|
| 16 |
|
|
|
|
| 5 |
from datasets import load_dataset
|
| 6 |
from huggingface_hub import InferenceClient
|
| 7 |
|
| 8 |
+
# https://huggingface.co/models?inference=warm&pipeline_tag=text-generation&sort=trending
|
| 9 |
MODEL_IDS = [
|
| 10 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
| 11 |
"microsoft/Phi-3-mini-4k-instruct",
|
| 12 |
"mistralai/Mistral-7B-Instruct-v0.2"
|
| 13 |
]
|
| 14 |
+
CLIENTS = [InferenceClient(model_id, token=os.environ["HF_AUTH_TOKEN"]) for model_id in MODEL_IDS]
|
| 15 |
|
| 16 |
dataset = load_dataset("argilla/magpie-ultra-v0.1", split="train")
|
| 17 |
|